From 7aa79f948749da7de3de0c427e9c9ee0ff595243 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 12 Jun 2009 10:26:20 +0200 Subject: [S390] vdso: kernel parameter syntax The syntax of the vdso kernel parameter is documented as vdso=[on|off]. The implementation uses vdso=[0|1], an invalid parameter string disables the vdso support. Fix the mismatch by adding vdso=[on|off] as additional parameter syntax. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/vdso.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 89b2e7f1b7a9..7b76ee4fb16c 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -53,8 +53,19 @@ unsigned int __read_mostly vdso_enabled = 1; static int __init vdso_setup(char *s) { - vdso_enabled = simple_strtoul(s, NULL, 0); - return 1; + unsigned long val; + int rc; + + rc = 0; + if (strncmp(s, "on", 3) == 0) + vdso_enabled = 1; + else if (strncmp(s, "off", 4) == 0) + vdso_enabled = 0; + else { + rc = strict_strtoul(s, 0, &val); + vdso_enabled = rc ? 0 : !!val; + } + return !rc; } __setup("vdso=", vdso_setup); -- cgit v1.2.3 From 76d4e00a05d06c1d1552adea24fcf6182c9d8999 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 12 Jun 2009 10:26:21 +0200 Subject: [S390] merge cpu.h into cputime.h All definition in cpu.h have to do with cputime accounting. Move them to cputime.h and remove the header file. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cpu.h | 32 -------------------------------- arch/s390/include/asm/cputime.h | 19 +++++++++++++++++++ arch/s390/kernel/nmi.c | 2 +- arch/s390/kernel/s390_ext.c | 2 +- arch/s390/kernel/smp.c | 2 +- arch/s390/kernel/vtime.c | 2 +- drivers/s390/cio/cio.c | 2 +- 7 files changed, 24 insertions(+), 37 deletions(-) delete mode 100644 arch/s390/include/asm/cpu.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h deleted file mode 100644 index d60a2eefb17b..000000000000 --- a/arch/s390/include/asm/cpu.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * include/asm-s390/cpu.h - * - * Copyright IBM Corp. 2007 - * Author(s): Heiko Carstens - */ - -#ifndef _ASM_S390_CPU_H_ -#define _ASM_S390_CPU_H_ - -#include -#include -#include - -struct s390_idle_data { - spinlock_t lock; - unsigned long long idle_count; - unsigned long long idle_enter; - unsigned long long idle_time; -}; - -DECLARE_PER_CPU(struct s390_idle_data, s390_idle); - -void vtime_start_cpu(void); - -static inline void s390_idle_check(void) -{ - if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) - vtime_start_cpu(); -} - -#endif /* _ASM_S390_CPU_H_ */ diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 941384fbd39c..ec917d42ee6d 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -9,6 +9,9 @@ #ifndef _S390_CPUTIME_H #define _S390_CPUTIME_H +#include +#include +#include #include /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ @@ -174,8 +177,24 @@ cputime64_to_clock_t(cputime64_t cputime) return __div(cputime, 4096000000ULL / USER_HZ); } +struct s390_idle_data { + spinlock_t lock; + unsigned long long idle_count; + unsigned long long idle_enter; + unsigned long long idle_time; +}; + +DECLARE_PER_CPU(struct s390_idle_data, s390_idle); + +void vtime_start_cpu(void); cputime64_t s390_get_idle_time(int cpu); #define arch_idle_time(cpu) s390_get_idle_time(cpu) +static inline void s390_idle_check(void) +{ + if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) + vtime_start_cpu(); +} + #endif /* _S390_CPUTIME_H */ diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 28cf196ba775..015e27da40eb 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index a0d2d55d7fb3..6b0686d78fc7 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a985a3ba4401..0af302ceac2d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -47,7 +47,7 @@ #include #include #include -#include +#include #include #include "entry.h" diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index c87f59bd8246..c8eb7255332b 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include static ext_int_info_t ext_int_info_timer; diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 2aebb9823044..9889f188c7c5 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From ce58ae6f7f6bdd48c87472ff830a6d586ff076b2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:22 +0200 Subject: [S390] implement interrupt-enabling rwlocks arch backend for f5f7eac41db827a47b2163330eecd7bb55ae9f12 "Allow rwlocks to re-enable interrupts". Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/spinlock.h | 19 ++++++++++++++++--- arch/s390/lib/spinlock.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index f3861b09ebb0..c9af0d19c7ab 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -122,8 +122,10 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp) #define __raw_write_can_lock(x) ((x)->lock == 0) extern void _raw_read_lock_wait(raw_rwlock_t *lp); +extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); extern int _raw_read_trylock_retry(raw_rwlock_t *lp); extern void _raw_write_lock_wait(raw_rwlock_t *lp); +extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); extern int _raw_write_trylock_retry(raw_rwlock_t *lp); static inline void __raw_read_lock(raw_rwlock_t *rw) @@ -134,6 +136,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) _raw_read_lock_wait(rw); } +static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) +{ + unsigned int old; + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + _raw_read_lock_wait_flags(rw, flags); +} + static inline void __raw_read_unlock(raw_rwlock_t *rw) { unsigned int old, cmp; @@ -151,6 +161,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) _raw_write_lock_wait(rw); } +static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) +{ + if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + _raw_write_lock_wait_flags(rw, flags); +} + static inline void __raw_write_unlock(raw_rwlock_t *rw) { _raw_compare_and_swap(&rw->lock, 0x80000000, 0); @@ -172,9 +188,6 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return _raw_write_trylock_retry(rw); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) - #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index e41f4008afc5..f7e0d30250b7 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -124,6 +124,27 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_lock_wait); +void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +{ + unsigned int old; + int count = spin_retry; + + local_irq_restore(flags); + while (1) { + if (count-- <= 0) { + _raw_yield(); + count = spin_retry; + } + if (!__raw_read_can_lock(rw)) + continue; + old = rw->lock & 0x7fffffffU; + local_irq_disable(); + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + return; + } +} +EXPORT_SYMBOL(_raw_read_lock_wait_flags); + int _raw_read_trylock_retry(raw_rwlock_t *rw) { unsigned int old; @@ -157,6 +178,25 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_write_lock_wait); +void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +{ + int count = spin_retry; + + local_irq_restore(flags); + while (1) { + if (count-- <= 0) { + _raw_yield(); + count = spin_retry; + } + if (!__raw_write_can_lock(rw)) + continue; + local_irq_disable(); + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + return; + } +} +EXPORT_SYMBOL(_raw_write_lock_wait_flags); + int _raw_write_trylock_retry(raw_rwlock_t *rw) { int count = spin_retry; -- cgit v1.2.3 From 8c4caa4fbfc18aa50d9d682f502303a8e0d72726 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 12 Jun 2009 10:26:23 +0200 Subject: [S390] use facility list for cpu type safety check Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/lowcore.h | 1 + arch/s390/kernel/head.S | 49 ++++++++++++++++++++++++++++------------- 2 files changed, 35 insertions(+), 15 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 3aeca492b147..713fe9fb1fcc 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -30,6 +30,7 @@ #define __LC_SUBCHANNEL_NR 0x00ba #define __LC_IO_INT_PARM 0x00bc #define __LC_IO_INT_WORD 0x00c0 +#define __LC_STFL_FAC_LIST 0x00c8 #define __LC_MCCK_CODE 0x00e8 #define __LC_DUMP_REIPL 0x0e00 diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 22596d70fc2e..bf8cf1caeffc 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -479,27 +479,46 @@ startup:basr %r13,0 # get base mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13) #ifndef CONFIG_MARCH_G5 - # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} - stidp __LC_CPUID # store cpuid - lhi %r0,(3f-2f) / 2 - la %r1,2f-.LPG0(%r13) -0: clc __LC_CPUID+4(2),0(%r1) - jne 3f - lpsw 1f-.LPG0(13) # machine type not good enough, crash + # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} + xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST + stfl __LC_STFL_FAC_LIST # store facility list + tm __LC_STFL_FAC_LIST,0x01 # stfle available ? + jz 0f + la %r0,0 + .insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended +0: l %r0,__LC_STFL_FAC_LIST + n %r0,2f+8-.LPG0(%r13) + cl %r0,2f+8-.LPG0(%r13) + jne 1f + l %r0,__LC_STFL_FAC_LIST+4 + n %r0,2f+12-.LPG0(%r13) + cl %r0,2f+12-.LPG0(%r13) + je 3f +1: lpsw 2f-.LPG0(13) # machine type not good enough, crash .align 16 -1: .long 0x000a0000,0x00000000 -2: +2: .long 0x000a0000,0x8badcccc +#if defined(CONFIG_64BIT) +#if defined(CONFIG_MARCH_Z10) + .long 0xc100efe3, 0xf0680000 +#elif defined(CONFIG_MARCH_Z9_109) + .long 0xc100efc3, 0x00000000 +#elif defined(CONFIG_MARCH_Z990) + .long 0xc0002000, 0x00000000 +#elif defined(CONFIG_MARCH_Z900) + .long 0xc0000000, 0x00000000 +#endif +#else #if defined(CONFIG_MARCH_Z10) - .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096 + .long 0x8100c880, 0x00000000 #elif defined(CONFIG_MARCH_Z9_109) - .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086 + .long 0x8100c880, 0x00000000 #elif defined(CONFIG_MARCH_Z990) - .short 0x9672, 0x2064, 0x2066 + .long 0x80002000, 0x00000000 #elif defined(CONFIG_MARCH_Z900) - .short 0x9672 + .long 0x80000000, 0x00000000 +#endif #endif -3: la %r1,2(%r1) - brct %r0,0b +3: #endif l %r13,4f-.LPG0(%r13) -- cgit v1.2.3 From d90cbd469c9c7c1494fc2084af9319e6a557368b Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:24 +0200 Subject: [S390] add mini sclp driver This adds a mini sclp device driver for very early use. The primary and probably only use will be to emit a message to the console if the cpu doesn't provide the minimum required capabilities to run the kernel. After printing the message a disabled wait will be loaded and the machine stops operating. Printing the message is also part of this patch. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/Makefile | 2 +- arch/s390/kernel/head.S | 18 ++- arch/s390/kernel/sclp.S | 327 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 342 insertions(+), 5 deletions(-) create mode 100644 arch/s390/kernel/sclp.S (limited to 'arch/s390') diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 228e3105ded7..0657de7944fe 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -22,7 +22,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ - vdso.o vtime.o sysinfo.o nmi.o + vdso.o vtime.o sysinfo.o nmi.o sclp.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index bf8cf1caeffc..6d227413cbe7 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/head.S - * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999,2009 * * Author(s): Hartmut Penner * Martin Schwidefsky @@ -494,7 +492,19 @@ startup:basr %r13,0 # get base n %r0,2f+12-.LPG0(%r13) cl %r0,2f+12-.LPG0(%r13) je 3f -1: lpsw 2f-.LPG0(13) # machine type not good enough, crash +1: l %r15,.Lstack-.LPG0(%r13) + ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE + ahi %r15,-96 + la %r2,.Lals_string-.LPG0(%r13) + l %r3,.Lsclp_print-.LPG0(%r13) + basr %r14,%r3 + lpsw 2f-.LPG0(%r13) # machine type not good enough, crash +.Lals_string: + .asciz "The Linux kernel requires more recent processor hardware" +.Lsclp_print: + .long _sclp_print_early +.Lstack: + .long init_thread_union .align 16 2: .long 0x000a0000,0x8badcccc #if defined(CONFIG_64BIT) diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S new file mode 100644 index 000000000000..20639dfe0c42 --- /dev/null +++ b/arch/s390/kernel/sclp.S @@ -0,0 +1,327 @@ +/* + * Mini SCLP driver. + * + * Copyright IBM Corp. 2004,2009 + * + * Author(s): Peter Oberparleiter , + * Heiko Carstens , + * + */ + +LC_EXT_NEW_PSW = 0x58 # addr of ext int handler +LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter +LC_EXT_INT_CODE = 0x86 # addr of ext int code + +# +# Subroutine which waits synchronously until either an external interruption +# or a timeout occurs. +# +# Parameters: +# R2 = 0 for no timeout, non-zero for timeout in (approximated) seconds +# +# Returns: +# R2 = 0 on interrupt, 2 on timeout +# R3 = external interruption parameter if R2=0 +# + +.section ".init.text","ax" + +_sclp_wait_int: + stm %r6,%r15,24(%r15) # save registers + basr %r13,0 # get base register +.LbaseS1: + ahi %r15,-96 # create stack frame + la %r8,LC_EXT_NEW_PSW # register int handler + mvc .LoldpswS1-.LbaseS1(8,%r13),0(%r8) + mvc 0(8,%r8),.LextpswS1-.LbaseS1(%r13) + lhi %r6,0x0200 # cr mask for ext int (cr0.54) + ltr %r2,%r2 + jz .LsetctS1 + ahi %r6,0x0800 # cr mask for clock int (cr0.52) + stck .LtimeS1-.LbaseS1(%r13) # initiate timeout + al %r2,.LtimeS1-.LbaseS1(%r13) + st %r2,.LtimeS1-.LbaseS1(%r13) + sckc .LtimeS1-.LbaseS1(%r13) + +.LsetctS1: + stctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # enable required interrupts + l %r0,.LctlS1-.LbaseS1(%r13) + lhi %r1,~(0x200 | 0x800) # clear old values + nr %r1,%r0 + or %r1,%r6 # set new value + st %r1,.LctlS1-.LbaseS1(%r13) + lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) + st %r0,.LctlS1-.LbaseS1(%r13) + lhi %r2,2 # return code for timeout +.LloopS1: + lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt +.LwaitS1: + lh %r7,LC_EXT_INT_CODE + chi %r7,0x1004 # timeout? + je .LtimeoutS1 + chi %r7,0x2401 # service int? + jne .LloopS1 + sr %r2,%r2 + l %r3,LC_EXT_INT_PARAM +.LtimeoutS1: + lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting + # restore old handler + mvc 0(8,%r8),.LoldpswS1-.LbaseS1(%r13) + lm %r6,%r15,120(%r15) # restore registers + br %r14 # return to caller + + .align 8 +.LoldpswS1: + .long 0, 0 # old ext int PSW +.LextpswS1: + .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int +.LwaitpswS1: + .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int +.LtimeS1: + .quad 0 # current time +.LctlS1: + .long 0 # CT0 contents + +# +# Subroutine to synchronously issue a service call. +# +# Parameters: +# R2 = command word +# R3 = sccb address +# +# Returns: +# R2 = 0 on success, 1 on failure +# R3 = sccb response code if R2 = 0 +# + +_sclp_servc: + stm %r6,%r15,24(%r15) # save registers + ahi %r15,-96 # create stack frame + lr %r6,%r2 # save command word + lr %r7,%r3 # save sccb address +.LretryS2: + lhi %r2,1 # error return code + .insn rre,0xb2200000,%r6,%r7 # servc + brc 1,.LendS2 # exit if not operational + brc 8,.LnotbusyS2 # go on if not busy + sr %r2,%r2 # wait until no longer busy + bras %r14,_sclp_wait_int + j .LretryS2 # retry +.LnotbusyS2: + sr %r2,%r2 # wait until result + bras %r14,_sclp_wait_int + sr %r2,%r2 + lh %r3,6(%r7) +.LendS2: + lm %r6,%r15,120(%r15) # restore registers + br %r14 + +# +# Subroutine to set up the SCLP interface. +# +# Parameters: +# R2 = 0 to activate, non-zero to deactivate +# +# Returns: +# R2 = 0 on success, non-zero on failure +# + +_sclp_setup: + stm %r6,%r15,24(%r15) # save registers + ahi %r15,-96 # create stack frame + basr %r13,0 # get base register +.LbaseS3: + l %r6,.LsccbS0-.LbaseS3(%r13) # prepare init mask sccb + mvc 0(.LinitendS3-.LinitsccbS3,%r6),.LinitsccbS3-.LbaseS3(%r13) + ltr %r2,%r2 # initialization? + jz .LdoinitS3 # go ahead + # clear masks + xc .LinitmaskS3-.LinitsccbS3(8,%r6),.LinitmaskS3-.LinitsccbS3(%r6) +.LdoinitS3: + l %r2,.LwritemaskS3-.LbaseS3(%r13)# get command word + lr %r3,%r6 # get sccb address + bras %r14,_sclp_servc # issue service call + ltr %r2,%r2 # servc successful? + jnz .LerrorS3 + chi %r3,0x20 # write mask successful? + jne .LerrorS3 + # check masks + la %r2,.LinitmaskS3-.LinitsccbS3(%r6) + l %r1,0(%r2) # receive mask ok? + n %r1,12(%r2) + cl %r1,0(%r2) + jne .LerrorS3 + l %r1,4(%r2) # send mask ok? + n %r1,8(%r2) + cl %r1,4(%r2) + sr %r2,%r2 + je .LendS3 +.LerrorS3: + lhi %r2,1 # error return code +.LendS3: + lm %r6,%r15,120(%r15) # restore registers + br %r14 +.LwritemaskS3: + .long 0x00780005 # SCLP command for write mask +.LinitsccbS3: + .word .LinitendS3-.LinitsccbS3 + .byte 0,0,0,0 + .word 0 + .word 0 + .word 4 +.LinitmaskS3: + .long 0x80000000 + .long 0x40000000 + .long 0 + .long 0 +.LinitendS3: + +# +# Subroutine which prints a given text to the SCLP console. +# +# Parameters: +# R2 = address of nil-terminated ASCII text +# +# Returns: +# R2 = 0 on success, 1 on failure +# + +_sclp_print: + stm %r6,%r15,24(%r15) # save registers + ahi %r15,-96 # create stack frame + basr %r13,0 # get base register +.LbaseS4: + l %r8,.LsccbS0-.LbaseS4(%r13) # prepare write data sccb + mvc 0(.LmtoS4-.LwritesccbS4,%r8),.LwritesccbS4-.LbaseS4(%r13) + la %r7,.LmtoS4-.LwritesccbS4(%r8) # current mto addr + sr %r0,%r0 + l %r10,.Lascebc-.LbaseS4(%r13) # address of translation table +.LinitmtoS4: + # initialize mto + mvc 0(.LmtoendS4-.LmtoS4,%r7),.LmtoS4-.LbaseS4(%r13) + lhi %r6,.LmtoendS4-.LmtoS4 # current mto length +.LloopS4: + ic %r0,0(%r2) # get character + ahi %r2,1 + ltr %r0,%r0 # end of string? + jz .LfinalizemtoS4 + chi %r0,0x15 # end of line (NL)? + jz .LfinalizemtoS4 + stc %r0,0(%r6,%r7) # copy to mto + la %r11,0(%r6,%r7) + tr 0(1,%r11),0(%r10) # translate to EBCDIC + ahi %r6,1 + j .LloopS4 +.LfinalizemtoS4: + sth %r6,0(%r7) # update mto length + lh %r9,.LmdbS4-.LwritesccbS4(%r8) # update mdb length + ar %r9,%r6 + sth %r9,.LmdbS4-.LwritesccbS4(%r8) + lh %r9,.LevbufS4-.LwritesccbS4(%r8)# update evbuf length + ar %r9,%r6 + sth %r9,.LevbufS4-.LwritesccbS4(%r8) + lh %r9,0(%r8) # update sccb length + ar %r9,%r6 + sth %r9,0(%r8) + ar %r7,%r6 # update current mto adress + ltr %r0,%r0 # more characters? + jnz .LinitmtoS4 + l %r2,.LwritedataS4-.LbaseS4(%r13)# write data + lr %r3,%r8 + bras %r14,_sclp_servc + ltr %r2,%r2 # servc successful? + jnz .LendS4 + chi %r3,0x20 # write data successful? + je .LendS4 + lhi %r2,1 # error return code +.LendS4: + lm %r6,%r15,120(%r15) # restore registers + br %r14 + +# +# Function which prints a given text to the SCLP console. +# +# Parameters: +# R2 = address of nil-terminated ASCII text +# +# Returns: +# R2 = 0 on success, 1 on failure +# + + .globl _sclp_print_early +_sclp_print_early: + stm %r6,%r15,24(%r15) # save registers + ahi %r15,-96 # create stack frame + lr %r10,%r2 # save string pointer + lhi %r2,0 + bras %r14,_sclp_setup # enable console + ltr %r2,%r2 + jnz .LendS5 + lr %r2,%r10 + bras %r14,_sclp_print # print string + ltr %r2,%r2 + jnz .LendS5 + lhi %r2,1 + bras %r14,_sclp_setup # disable console +.LendS5: + lm %r6,%r15,120(%r15) # restore registers + br %r14 + +.LwritedataS4: + .long 0x00760005 # SCLP command for write data +.LwritesccbS4: + # sccb + .word .LmtoS4-.LwritesccbS4 + .byte 0 + .byte 0,0,0 + .word 0 + + # evbuf +.LevbufS4: + .word .LmtoS4-.LevbufS4 + .byte 0x02 + .byte 0 + .word 0 + +.LmdbS4: + # mdb + .word .LmtoS4-.LmdbS4 + .word 1 + .long 0xd4c4c240 + .long 1 + + # go +.LgoS4: + .word .LmtoS4-.LgoS4 + .word 1 + .long 0 + .byte 0,0,0,0,0,0,0,0 + .byte 0,0,0 + .byte 0 + .byte 0,0,0,0,0,0,0 + .byte 0 + .word 0 + .byte 0,0,0,0,0,0,0,0,0,0 + .byte 0,0,0,0,0,0,0,0 + .byte 0,0,0,0,0,0,0,0 + +.LmtoS4: + .word .LmtoendS4-.LmtoS4 + .word 4 + .word 0x1000 + .byte 0 + .byte 0,0,0 +.LmtoendS4: + + # Global constants +.LsccbS0: + .long _sclp_work_area +.Lascebc: + .long _ascebc +.previous + +.section ".init.data","a" + .balign 4096 +_sclp_work_area: + .fill 4096 +.previous -- cgit v1.2.3 From 7757591ab4a36314a258e181dbf0994415c288c2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:25 +0200 Subject: [S390] implement is_compat_task Implement is_compat_task and use it all over the place. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/compat.h | 19 ++++++++++++++++++- arch/s390/kernel/process.c | 3 ++- arch/s390/kernel/ptrace.c | 12 +++++------- arch/s390/kernel/signal.c | 3 ++- arch/s390/kernel/vdso.c | 4 ++-- arch/s390/mm/fault.c | 3 ++- arch/s390/mm/mmap.c | 11 +++++------ 7 files changed, 36 insertions(+), 19 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index de065b32381a..01a08020bc0e 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -5,6 +5,7 @@ */ #include #include +#include #define PSW32_MASK_PER 0x40000000UL #define PSW32_MASK_DAT 0x04000000UL @@ -163,12 +164,28 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } +#ifdef CONFIG_COMPAT + +static inline int is_compat_task(void) +{ + return test_thread_flag(TIF_31BIT); +} + +#else + +static inline int is_compat_task(void) +{ + return 0; +} + +#endif + static inline void __user *compat_alloc_user_space(long len) { unsigned long stack; stack = KSTK_ESP(current); - if (test_thread_flag(TIF_31BIT)) + if (is_compat_task()) stack &= 0x7fffffffUL; return (void __user *) (stack - len); } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index a3acd8e60aff..355f7a30c3f1 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -204,7 +205,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, save_fp_regs(&p->thread.fp_regs); /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) { - if (test_thread_flag(TIF_31BIT)) { + if (is_compat_task()) { p->thread.acrs[0] = (unsigned int) regs->gprs[6]; } else { p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32); diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 75c496f4f16d..99eef179e903 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -36,7 +36,7 @@ #include #include #include - +#include #include #include #include @@ -69,7 +69,7 @@ FixPerRegisters(struct task_struct *task) if (per_info->single_step) { per_info->control_regs.bits.starting_addr = 0; #ifdef CONFIG_COMPAT - if (test_thread_flag(TIF_31BIT)) + if (is_compat_task()) per_info->control_regs.bits.ending_addr = 0x7fffffffUL; else #endif @@ -482,8 +482,7 @@ static int peek_user_compat(struct task_struct *child, { __u32 tmp; - if (!test_thread_flag(TIF_31BIT) || - (addr & 3) || addr > sizeof(struct user) - 3) + if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) return -EIO; tmp = __peek_user_compat(child, addr); @@ -584,8 +583,7 @@ static int __poke_user_compat(struct task_struct *child, static int poke_user_compat(struct task_struct *child, addr_t addr, addr_t data) { - if (!test_thread_flag(TIF_31BIT) || - (addr & 3) || addr > sizeof(struct user32) - 3) + if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3) return -EIO; return __poke_user_compat(child, addr, data); @@ -660,7 +658,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) } if (unlikely(current->audit_context)) - audit_syscall_entry(test_thread_flag(TIF_31BIT) ? + audit_syscall_entry(is_compat_task() ? AUDIT_ARCH_S390 : AUDIT_ARCH_S390X, regs->gprs[2], regs->orig_gpr2, regs->gprs[3], regs->gprs[4], diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 3cf74c3ccb69..062bd64e65fa 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -482,7 +483,7 @@ void do_signal(struct pt_regs *regs) /* Whee! Actually deliver the signal. */ int ret; #ifdef CONFIG_COMPAT - if (test_thread_flag(TIF_31BIT)) { + if (is_compat_task()) { ret = handle_signal32(signr, &ka, &info, oldset, regs); } else diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 7b76ee4fb16c..45e1708b70fd 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -22,7 +22,7 @@ #include #include #include - +#include #include #include #include @@ -214,7 +214,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_pagelist = vdso64_pagelist; vdso_pages = vdso64_pages; #ifdef CONFIG_COMPAT - if (test_thread_flag(TIF_31BIT)) { + if (is_compat_task()) { vdso_pagelist = vdso32_pagelist; vdso_pages = vdso32_pages; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 833e8366c351..220a152c836c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -239,7 +240,7 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs, up_read(&mm->mmap_sem); clear_tsk_thread_flag(current, TIF_SINGLE_STEP); #ifdef CONFIG_COMPAT - compat = test_tsk_thread_flag(current, TIF_31BIT); + compat = is_compat_task(); if (compat && instruction == 0x0a77) sys32_sigreturn(); else if (compat && instruction == 0x0aad) diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index e008d236cc15..f4558ccf02b9 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -28,6 +28,7 @@ #include #include #include +#include /* * Top of mmap area (just below the process stack). @@ -55,7 +56,7 @@ static inline int mmap_is_legacy(void) /* * Force standard allocation for 64 bit programs. */ - if (!test_thread_flag(TIF_31BIT)) + if (!is_compat_task()) return 1; #endif return sysctl_legacy_va_layout || @@ -91,7 +92,7 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); int s390_mmap_check(unsigned long addr, unsigned long len) { - if (!test_thread_flag(TIF_31BIT) && + if (!is_compat_task() && len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) return crst_table_upgrade(current->mm, 1UL << 53); return 0; @@ -108,8 +109,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); if (!(area & ~PAGE_MASK)) return area; - if (area == -ENOMEM && - !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { /* Upgrade the page table to 4 levels and retry. */ rc = crst_table_upgrade(mm, 1UL << 53); if (rc) @@ -131,8 +131,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); if (!(area & ~PAGE_MASK)) return area; - if (area == -ENOMEM && - !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { /* Upgrade the page table to 4 levels and retry. */ rc = crst_table_upgrade(mm, 1UL << 53); if (rc) -- cgit v1.2.3 From bcf5cef7db869dd3b0ec55ad99641e66b2f5cf02 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:26 +0200 Subject: [S390] secure computing arch backend Enable secure computing on s390 as well. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 18 ++++++++++++++++++ arch/s390/include/asm/seccomp.h | 16 ++++++++++++++++ arch/s390/include/asm/thread_info.h | 10 ++++++---- arch/s390/kernel/entry.S | 5 +++-- arch/s390/kernel/entry64.S | 5 +++-- arch/s390/kernel/ptrace.c | 6 +++++- 6 files changed, 51 insertions(+), 9 deletions(-) create mode 100644 arch/s390/include/asm/seccomp.h (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2eca5fe0e75b..1094787e97e5 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -567,6 +567,24 @@ bool "s390 guest support for KVM (EXPERIMENTAL)" the KVM hypervisor. This will add detection for KVM as well as a virtio transport. If KVM is detected, the virtio console will be the default console. + +config SECCOMP + bool "Enable seccomp to safely compute untrusted bytecode" + depends on PROC_FS + default y + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via /proc//seccomp, it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. + endmenu source "net/Kconfig" diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h new file mode 100644 index 000000000000..781a9cf9b002 --- /dev/null +++ b/arch/s390/include/asm/seccomp.h @@ -0,0 +1,16 @@ +#ifndef _ASM_S390_SECCOMP_H +#define _ASM_S390_SECCOMP_H + +#include + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#define __NR_seccomp_sigreturn __NR_sigreturn + +#define __NR_seccomp_read_32 __NR_read +#define __NR_seccomp_write_32 __NR_write +#define __NR_seccomp_exit_32 __NR_exit +#define __NR_seccomp_sigreturn_32 __NR_sigreturn + +#endif /* _ASM_S390_SECCOMP_H */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 461f2abd2e6f..2f86653dda69 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -83,14 +83,15 @@ static inline struct thread_info *current_thread_info(void) /* * thread information flags bit numbers */ -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ -#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ +#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ +#define TIF_SECCOMP 10 /* secure computing */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -99,15 +100,16 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ #define TIF_FREEZE 21 /* thread is freezing for suspend */ -#define _TIF_SYSCALL_TRACE (1<>8 | _TIF_SYSCALL_AUDIT>>8 | _TIF_SECCOMP>>8) STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT @@ -265,7 +266,7 @@ sysc_do_restart: sth %r7,SP_SVCNR(%r15) sll %r7,2 # svc number *4 l %r8,BASED(.Lsysc_table) - tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) + tm __TI_flags+2(%r9),_TIF_SYSCALL l %r8,0(%r7,%r8) # get system call addr. bnz BASED(sysc_tracesys) basr %r14,%r8 # call sys_xxxx @@ -405,7 +406,7 @@ sysc_tracego: basr %r14,%r8 # call sys_xxx st %r2,SP_R2(%r15) # store return value sysc_tracenogo: - tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) + tm __TI_flags+2(%r9),_TIF_SYSCALL bz BASED(sysc_return) l %r1,BASED(.Ltrace_exit) la %r2,SP_PTREGS(%r15) # load pt_regs diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 84a105838e03..3cec9b504f5f 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -56,6 +56,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING) +_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | _TIF_SECCOMP>>8) #define BASED(name) name-system_call(%r13) @@ -260,7 +261,7 @@ sysc_do_restart: larl %r10,sys_call_table_emu # use 31 bit emulation system calls sysc_noemu: #endif - tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) + tm __TI_flags+6(%r9),_TIF_SYSCALL lgf %r8,0(%r7,%r10) # load address of system call routine jnz sysc_tracesys basr %r14,%r8 # call sys_xxxx @@ -391,7 +392,7 @@ sysc_tracego: basr %r14,%r8 # call sys_xxx stg %r2,SP_R2(%r15) # store return value sysc_tracenogo: - tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) + tm __TI_flags+6(%r9),_TIF_SYSCALL jz sysc_return la %r2,SP_PTREGS(%r15) # load pt_regs larl %r14,sysc_return # return point is sysc_return diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 99eef179e903..b6fc1ae2ffcb 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -36,7 +36,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -640,6 +641,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) { long ret; + /* Do the secure computing check first. */ + secure_computing(regs->gprs[2]); + /* * The sysc_tracesys code in entry.S stored the system * call number to gprs[2]. -- cgit v1.2.3 From dab4079d5b5ac421208499d5e554a07f9beb16e4 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:32 +0200 Subject: [S390] uaccess: use might_fault() instead of might_sleep() Adds more checking in case lockdep is turned on. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/uaccess.h | 16 ++++++++-------- arch/s390/kvm/kvm-s390.c | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 0235970278f0..8377e91533d2 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -131,7 +131,7 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) #define put_user(x, ptr) \ ({ \ - might_sleep(); \ + might_fault(); \ __put_user(x, ptr); \ }) @@ -180,7 +180,7 @@ extern int __put_user_bad(void) __attribute__((noreturn)); #define get_user(x, ptr) \ ({ \ - might_sleep(); \ + might_fault(); \ __get_user(x, ptr); \ }) @@ -231,7 +231,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { - might_sleep(); + might_fault(); if (access_ok(VERIFY_WRITE, to, n)) n = __copy_to_user(to, from, n); return n; @@ -282,7 +282,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - might_sleep(); + might_fault(); if (access_ok(VERIFY_READ, from, n)) n = __copy_from_user(to, from, n); else @@ -299,7 +299,7 @@ __copy_in_user(void __user *to, const void __user *from, unsigned long n) static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) { - might_sleep(); + might_fault(); if (__access_ok(from,n) && __access_ok(to,n)) n = __copy_in_user(to, from, n); return n; @@ -312,7 +312,7 @@ static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) { long res = -EFAULT; - might_sleep(); + might_fault(); if (access_ok(VERIFY_READ, src, 1)) res = uaccess.strncpy_from_user(count, src, dst); return res; @@ -321,7 +321,7 @@ strncpy_from_user(char *dst, const char __user *src, long count) static inline unsigned long strnlen_user(const char __user * src, unsigned long n) { - might_sleep(); + might_fault(); return uaccess.strnlen_user(n, src); } @@ -354,7 +354,7 @@ __clear_user(void __user *to, unsigned long n) static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { - might_sleep(); + might_fault(); if (access_ok(VERIFY_WRITE, to, n)) n = uaccess.clear_user(n, to); return n; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 10bccd1f8aee..c18b21d6991c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -512,7 +512,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) BUG(); } - might_sleep(); + might_fault(); do { __vcpu_run(vcpu); -- cgit v1.2.3 From 239a64255fae8933d95273b5b92545949ca4e743 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:33 +0200 Subject: [S390] vmalloc: add vmalloc kernel parameter support With the kernel parameter 'vmalloc=' the size of the vmalloc area can be specified. This can be used to increase or decrease the size of the area. Works in the same way as on some other architectures. This can be useful for features which make excessive use of vmalloc and wouldn't work otherwise. The default sizes remain unchanged: 96MB for 31 bit kernels and 1GB for 64 bit kernels. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pgtable.h | 7 +++++-- arch/s390/mm/pgtable.c | 16 +++++++++++++--- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 5caddd4f7bed..60a7b1a1702f 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -112,12 +112,15 @@ extern char empty_zero_page[PAGE_SIZE]; * effect, this also makes sure that 64 bit module code cannot be used * as system call address. */ + +extern unsigned long VMALLOC_START; + #ifndef __s390x__ -#define VMALLOC_START 0x78000000UL +#define VMALLOC_SIZE (96UL << 20) #define VMALLOC_END 0x7e000000UL #define VMEM_MAP_END 0x80000000UL #else /* __s390x__ */ -#define VMALLOC_START 0x3e000000000UL +#define VMALLOC_SIZE (1UL << 30) #define VMALLOC_END 0x3e040000000UL #define VMEM_MAP_END 0x40000000000UL #endif /* __s390x__ */ diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index be6c1cf4ad5a..4ca8e826bf30 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -1,7 +1,5 @@ /* - * arch/s390/mm/pgtable.c - * - * Copyright IBM Corp. 2007 + * Copyright IBM Corp. 2007,2009 * Author(s): Martin Schwidefsky */ @@ -53,6 +51,18 @@ void clear_table_pgstes(unsigned long *table) #endif +unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; +EXPORT_SYMBOL(VMALLOC_START); + +static int __init parse_vmalloc(char *arg) +{ + if (!arg) + return -EINVAL; + VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK; + return 0; +} +early_param("vmalloc", parse_vmalloc); + unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) { struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); -- cgit v1.2.3 From f3d0a0d96a7a2b4e1335437f68b3181ab252050a Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 12 Jun 2009 10:26:40 +0200 Subject: [S390] profile_tick called twice profile_tick is called twice for every clock comparator interrupt. The generic clock event code does it in tick_sched_timer and the s390 backend code in clock_comparator_work. That is one too many, remove the one in the arch backend code. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/time.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index ef596d020573..ad9a999aaa92 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -95,12 +95,6 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime) xtime->tv_nsec = ((todval * 1000) >> 12); } -#ifdef CONFIG_PROFILING -#define s390_do_profile() profile_tick(CPU_PROFILING) -#else -#define s390_do_profile() do { ; } while(0) -#endif /* CONFIG_PROFILING */ - void clock_comparator_work(void) { struct clock_event_device *cd; @@ -109,7 +103,6 @@ void clock_comparator_work(void) set_clock_comparator(S390_lowcore.clock_comparator); cd = &__get_cpu_var(comparators); cd->event_handler(cd); - s390_do_profile(); } /* -- cgit v1.2.3 From 88df125fd6127e409793fd84a574566651450320 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:42 +0200 Subject: [S390] maccess: arch specific probe_kernel_write() implementation Add an s390 specific probe_kernel_write() function which allows to write to the kernel text segment even if write protection is enabled. This is implemented using the lra (load real address) and stura (store using real address) instructions. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/Makefile | 2 +- arch/s390/mm/maccess.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 arch/s390/mm/maccess.c (limited to 'arch/s390') diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index 2a7458134544..db05661ac895 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile @@ -2,7 +2,7 @@ # Makefile for the linux s390-specific parts of the memory manager. # -obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o +obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PAGE_STATES) += page-states.o diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c new file mode 100644 index 000000000000..81756271dc44 --- /dev/null +++ b/arch/s390/mm/maccess.c @@ -0,0 +1,61 @@ +/* + * Access kernel memory without faulting -- s390 specific implementation. + * + * Copyright IBM Corp. 2009 + * + * Author(s): Heiko Carstens , + * + */ + +#include +#include +#include +#include +#include + +/* + * This function writes to kernel memory bypassing DAT and possible + * write protection. It copies one to four bytes from src to dst + * using the stura instruction. + * Returns the number of bytes copied or -EFAULT. + */ +static long probe_kernel_write_odd(void *dst, void *src, size_t size) +{ + unsigned long count, aligned; + int offset, mask; + int rc = -EFAULT; + + aligned = (unsigned long) dst & ~3UL; + offset = (unsigned long) dst & 3; + count = min_t(unsigned long, 4 - offset, size); + mask = (0xf << (4 - count)) & 0xf; + mask >>= offset; + asm volatile( + " bras 1,0f\n" + " icm 0,0,0(%3)\n" + "0: l 0,0(%1)\n" + " lra %1,0(%1)\n" + "1: ex %2,0(1)\n" + "2: stura 0,%1\n" + " la %0,0\n" + "3:\n" + EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b) + : "+d" (rc), "+a" (aligned) + : "a" (mask), "a" (src) : "cc", "memory", "0", "1"); + return rc ? rc : count; +} + +long probe_kernel_write(void *dst, void *src, size_t size) +{ + long copied = 0; + + while (size) { + copied = probe_kernel_write_odd(dst, src, size); + if (copied < 0) + break; + dst += copied; + src += copied; + size -= copied; + } + return copied < 0 ? -EFAULT : 0; +} -- cgit v1.2.3 From a2b53673fae14601bb520acf6a6f154463994565 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:43 +0200 Subject: [S390] kprobes: use probe_kernel_write Use proble_kernel_write() to patch the kernel. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/kprobes.c | 31 ++----------------------------- 1 file changed, 2 insertions(+), 29 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index a01cf0284db2..9bb2f6241d9f 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -25,9 +25,9 @@ #include #include #include +#include #include #include -#include #include DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; @@ -155,35 +155,8 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) static int __kprobes swap_instruction(void *aref) { struct ins_replace_args *args = aref; - u32 *addr; - u32 instr; - int err = -EFAULT; - /* - * Text segment is read-only, hence we use stura to bypass dynamic - * address translation to exchange the instruction. Since stura - * always operates on four bytes, but we only want to exchange two - * bytes do some calculations to get things right. In addition we - * shall not cross any page boundaries (vmalloc area!) when writing - * the new instruction. - */ - addr = (u32 *)((unsigned long)args->ptr & -4UL); - if ((unsigned long)args->ptr & 2) - instr = ((*addr) & 0xffff0000) | args->new; - else - instr = ((*addr) & 0x0000ffff) | args->new << 16; - - asm volatile( - " lra %1,0(%1)\n" - "0: stura %2,%1\n" - "1: la %0,0\n" - "2:\n" - EX_TABLE(0b,2b) - : "+d" (err) - : "a" (addr), "d" (instr) - : "memory", "cc"); - - return err; + return probe_kernel_write(args->ptr, &args->new, sizeof(args->new)); } void __kprobes arch_arm_kprobe(struct kprobe *p) -- cgit v1.2.3 From dfd9f7abc0fb67b5781f340d982384cea53b2884 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:44 +0200 Subject: [S390] ftrace: add dynamic ftrace support Dynamic ftrace support for s390. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 2 + arch/s390/include/asm/ftrace.h | 19 ++++++ arch/s390/include/asm/lowcore.h | 8 ++- arch/s390/kernel/Makefile | 5 ++ arch/s390/kernel/early.c | 4 ++ arch/s390/kernel/ftrace.c | 132 ++++++++++++++++++++++++++++++++++++++++ arch/s390/kernel/mcount.S | 119 ++++++++++++++++++++++++++++-------- arch/s390/kernel/setup.c | 2 + arch/s390/kernel/smp.c | 1 + scripts/recordmcount.pl | 13 ++++ 10 files changed, 276 insertions(+), 29 deletions(-) create mode 100644 arch/s390/kernel/ftrace.c (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1094787e97e5..b674e79044a0 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -82,6 +82,8 @@ config S390 select USE_GENERIC_SMP_HELPERS if SMP select HAVE_SYSCALL_WRAPPERS select HAVE_FUNCTION_TRACER + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_DYNAMIC_FTRACE select HAVE_DEFAULT_NO_SPIN_MUTEXES select HAVE_OPROFILE select HAVE_KPROBES diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 5a5bc75e19d4..ba23d8f97d07 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -2,7 +2,26 @@ #define _ASM_S390_FTRACE_H #ifndef __ASSEMBLY__ + extern void _mcount(void); +extern unsigned long ftrace_dyn_func; + +struct dyn_arch_ftrace { }; + +#define MCOUNT_ADDR ((long)_mcount) + +#ifdef CONFIG_64BIT +#define MCOUNT_INSN_SIZE 24 +#define MCOUNT_OFFSET 14 +#else +#define MCOUNT_INSN_SIZE 30 +#define MCOUNT_OFFSET 8 #endif +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr - MCOUNT_OFFSET; +} + +#endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_FTRACE_H */ diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 713fe9fb1fcc..5046ad6b7a63 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -68,6 +68,7 @@ #define __LC_CPUID 0x02b0 #define __LC_INT_CLOCK 0x02c8 #define __LC_MACHINE_FLAGS 0x02d8 +#define __LC_FTRACE_FUNC 0x02dc #define __LC_IRB 0x0300 #define __LC_PFAULT_INTPARM 0x0080 #define __LC_CPU_TIMER_SAVE_AREA 0x00d8 @@ -113,6 +114,7 @@ #define __LC_INT_CLOCK 0x0340 #define __LC_VDSO_PER_CPU 0x0350 #define __LC_MACHINE_FLAGS 0x0358 +#define __LC_FTRACE_FUNC 0x0360 #define __LC_IRB 0x0380 #define __LC_PASTE 0x03c0 #define __LC_PFAULT_INTPARM 0x11b8 @@ -281,7 +283,8 @@ struct _lowcore __u64 int_clock; /* 0x02c8 */ __u64 clock_comparator; /* 0x02d0 */ __u32 machine_flags; /* 0x02d8 */ - __u8 pad_0x02dc[0x0300-0x02dc]; /* 0x02dc */ + __u32 ftrace_func; /* 0x02dc */ + __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ /* Interrupt response block */ __u8 irb[64]; /* 0x0300 */ @@ -386,7 +389,8 @@ struct _lowcore __u64 clock_comparator; /* 0x0348 */ __u64 vdso_per_cpu_data; /* 0x0350 */ __u64 machine_flags; /* 0x0358 */ - __u8 pad_0x0360[0x0380-0x0360]; /* 0x0360 */ + __u64 ftrace_func; /* 0x0360 */ + __u8 pad_0x0368[0x0380-0x0368]; /* 0x0368 */ /* Interrupt response block. */ __u8 irb[64]; /* 0x0380 */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 0657de7944fe..ce172bfaab8a 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -7,6 +7,10 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_early.o = -pg endif +ifdef CONFIG_DYNAMIC_FTRACE +CFLAGS_REMOVE_ftrace.o = -pg +endif + # # Passing null pointers is ok for smp code, since we access the lowcore here. # @@ -41,6 +45,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o # Kexec part S390_KEXEC_OBJS := machine_kexec.o crash.o diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index cf09948faad6..fb263736826c 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -410,5 +411,8 @@ void __init startup_init(void) sclp_facilities_detect(); detect_memory_layout(memory_chunk); S390_lowcore.machine_flags = machine_flags; +#ifdef CONFIG_DYNAMIC_FTRACE + S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; +#endif lockdep_on(); } diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c new file mode 100644 index 000000000000..0b81a784e039 --- /dev/null +++ b/arch/s390/kernel/ftrace.c @@ -0,0 +1,132 @@ +/* + * Dynamic function tracer architecture backend. + * + * Copyright IBM Corp. 2009 + * + * Author(s): Heiko Carstens , + * + */ + +#include +#include +#include +#include +#include + +void ftrace_disable_code(void); +void ftrace_call_code(void); +void ftrace_nop_code(void); + +#define FTRACE_INSN_SIZE 4 + +#ifdef CONFIG_64BIT + +asm( + " .align 4\n" + "ftrace_disable_code:\n" + " j 0f\n" + " .word 0x0024\n" + " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" + " basr %r14,%r1\n" + " lg %r14,8(15)\n" + " lgr %r0,%r0\n" + "0:\n"); + +asm( + " .align 4\n" + "ftrace_nop_code:\n" + " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); + +asm( + " .align 4\n" + "ftrace_call_code:\n" + " stg %r14,8(%r15)\n"); + +#else /* CONFIG_64BIT */ + +asm( + " .align 4\n" + "ftrace_disable_code:\n" + " j 0f\n" + " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" + " basr %r14,%r1\n" + " l %r14,4(%r15)\n" + " j 0f\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + "0:\n"); + +asm( + " .align 4\n" + "ftrace_nop_code:\n" + " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); + +asm( + " .align 4\n" + "ftrace_call_code:\n" + " st %r14,4(%r15)\n"); + +#endif /* CONFIG_64BIT */ + +static int ftrace_modify_code(unsigned long ip, + void *old_code, int old_size, + void *new_code, int new_size) +{ + unsigned char replaced[MCOUNT_INSN_SIZE]; + + /* + * Note: Due to modules code can disappear and change. + * We need to protect against faulting as well as code + * changing. We do this by using the probe_kernel_* + * functions. + * This however is just a simple sanity check. + */ + if (probe_kernel_read(replaced, (void *)ip, old_size)) + return -EFAULT; + if (memcmp(replaced, old_code, old_size) != 0) + return -EINVAL; + if (probe_kernel_write((void *)ip, new_code, new_size)) + return -EPERM; + return 0; +} + +static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + return ftrace_modify_code(rec->ip, + ftrace_call_code, FTRACE_INSN_SIZE, + ftrace_disable_code, MCOUNT_INSN_SIZE); +} + +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + if (addr == MCOUNT_ADDR) + return ftrace_make_initial_nop(mod, rec, addr); + return ftrace_modify_code(rec->ip, + ftrace_call_code, FTRACE_INSN_SIZE, + ftrace_nop_code, FTRACE_INSN_SIZE); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + return ftrace_modify_code(rec->ip, + ftrace_nop_code, FTRACE_INSN_SIZE, + ftrace_call_code, FTRACE_INSN_SIZE); +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + ftrace_dyn_func = (unsigned long)func; + return 0; +} + +int __init ftrace_dyn_arch_init(void *data) +{ + *(unsigned long *)data = 0; + return 0; +} diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 80641224a095..de274996f640 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2008 + * Copyright IBM Corp. 2008,2009 * * Author(s): Heiko Carstens , * @@ -7,36 +7,46 @@ #include -#ifndef CONFIG_64BIT -.globl _mcount + .globl ftrace_stub +ftrace_stub: + br %r14 + +#ifdef CONFIG_64BIT + +#ifdef CONFIG_DYNAMIC_FTRACE + + .globl _mcount _mcount: - stm %r0,%r5,8(%r15) - st %r14,56(%r15) - lr %r1,%r15 - ahi %r15,-96 - l %r3,100(%r15) - la %r2,0(%r14) - st %r1,__SF_BACKCHAIN(%r15) - la %r3,0(%r3) - bras %r14,0f - .long ftrace_trace_function -0: l %r14,0(%r14) - l %r14,0(%r14) - basr %r14,%r14 - ahi %r15,96 - lm %r0,%r5,8(%r15) - l %r14,56(%r15) br %r14 -.globl ftrace_stub -ftrace_stub: + .globl ftrace_caller +ftrace_caller: + stmg %r2,%r5,32(%r15) + stg %r14,112(%r15) + lgr %r1,%r15 + aghi %r15,-160 + stg %r1,__SF_BACKCHAIN(%r15) + lgr %r2,%r14 + lg %r3,168(%r15) + larl %r14,ftrace_dyn_func + lg %r14,0(%r14) + basr %r14,%r14 + aghi %r15,160 + lmg %r2,%r5,32(%r15) + lg %r14,112(%r15) br %r14 -#else /* CONFIG_64BIT */ + .data + .globl ftrace_dyn_func +ftrace_dyn_func: + .quad ftrace_stub + .previous + +#else /* CONFIG_DYNAMIC_FTRACE */ -.globl _mcount + .globl _mcount _mcount: - stmg %r0,%r5,16(%r15) + stmg %r2,%r5,32(%r15) stg %r14,112(%r15) lgr %r1,%r15 aghi %r15,-160 @@ -47,12 +57,67 @@ _mcount: lg %r14,0(%r14) basr %r14,%r14 aghi %r15,160 - lmg %r0,%r5,16(%r15) + lmg %r2,%r5,32(%r15) lg %r14,112(%r15) br %r14 -.globl ftrace_stub -ftrace_stub: +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#else /* CONFIG_64BIT */ + +#ifdef CONFIG_DYNAMIC_FTRACE + + .globl _mcount +_mcount: + br %r14 + + .globl ftrace_caller +ftrace_caller: + stm %r2,%r5,16(%r15) + st %r14,56(%r15) + lr %r1,%r15 + ahi %r15,-96 + l %r3,100(%r15) + la %r2,0(%r14) + st %r1,__SF_BACKCHAIN(%r15) + la %r3,0(%r3) + bras %r14,0f + .long ftrace_dyn_func +0: l %r14,0(%r14) + l %r14,0(%r14) + basr %r14,%r14 + ahi %r15,96 + lm %r2,%r5,16(%r15) + l %r14,56(%r15) + br %r14 + + .data + .globl ftrace_dyn_func +ftrace_dyn_func: + .long ftrace_stub + .previous + +#else /* CONFIG_DYNAMIC_FTRACE */ + + .globl _mcount +_mcount: + stm %r2,%r5,16(%r15) + st %r14,56(%r15) + lr %r1,%r15 + ahi %r15,-96 + l %r3,100(%r15) + la %r2,0(%r14) + st %r1,__SF_BACKCHAIN(%r15) + la %r3,0(%r3) + bras %r14,0f + .long ftrace_trace_function +0: l %r14,0(%r14) + l %r14,0(%r14) + basr %r14,%r14 + ahi %r15,96 + lm %r2,%r5,16(%r15) + l %r14,56(%r15) br %r14 +#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_64BIT */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7402b6a39ead..9717717c6fea 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -442,6 +443,7 @@ setup_lowcore(void) lc->steal_timer = S390_lowcore.steal_timer; lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_clock = S390_lowcore.last_update_clock; + lc->ftrace_func = S390_lowcore.ftrace_func; set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 0af302ceac2d..cc8c484984e3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -572,6 +572,7 @@ int __cpuinit __cpu_up(unsigned int cpu) cpu_lowcore->cpu_nr = cpu; cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; cpu_lowcore->machine_flags = S390_lowcore.machine_flags; + cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; eieio(); while (signal_processor(cpu, sigp_restart) == sigp_busy) diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 0fae7da0529c..91033e67321e 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -185,6 +185,19 @@ if ($arch eq "x86_64") { $objcopy .= " -O elf32-i386"; $cc .= " -m32"; +} elsif ($arch eq "s390" && $bits == 32) { + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$"; + $alignment = 4; + $ld .= " -m elf_s390"; + $cc .= " -m31"; + +} elsif ($arch eq "s390" && $bits == 64) { + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; + $alignment = 8; + $type = ".quad"; + $ld .= " -m elf64_s390"; + $cc .= " -m64"; + } elsif ($arch eq "sh") { $alignment = 2; -- cgit v1.2.3 From 8b4488f85d619253c9e631ec723368f400106771 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:45 +0200 Subject: [S390] ftrace: add function trace mcount test support Add support for early test if the function tracer is enabled or disabled. Saves some extra function calls. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 1 + arch/s390/kernel/mcount.S | 38 ++++++++++++++++++++++++++------------ 2 files changed, 27 insertions(+), 12 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b674e79044a0..480590f21570 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -82,6 +82,7 @@ config S390 select USE_GENERIC_SMP_HELPERS if SMP select HAVE_SYSCALL_WRAPPERS select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE select HAVE_DEFAULT_NO_SPIN_MUTEXES diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index de274996f640..0aa85ec94d08 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -21,6 +21,9 @@ _mcount: .globl ftrace_caller ftrace_caller: + larl %r1,function_trace_stop + icm %r1,0xf,0(%r1) + bnzr %r14 stmg %r2,%r5,32(%r15) stg %r14,112(%r15) lgr %r1,%r15 @@ -46,6 +49,9 @@ ftrace_dyn_func: .globl _mcount _mcount: + larl %r1,function_trace_stop + icm %r1,0xf,0(%r1) + bnzr %r14 stmg %r2,%r5,32(%r15) stg %r14,112(%r15) lgr %r1,%r15 @@ -74,21 +80,25 @@ _mcount: .globl ftrace_caller ftrace_caller: stm %r2,%r5,16(%r15) + bras %r1,2f +0: .long ftrace_trace_function +1: .long function_trace_stop +2: l %r2,1b-0b(%r1) + icm %r2,0xf,0(%r2) + jnz 3f st %r14,56(%r15) - lr %r1,%r15 + lr %r0,%r15 ahi %r15,-96 l %r3,100(%r15) la %r2,0(%r14) - st %r1,__SF_BACKCHAIN(%r15) + st %r0,__SF_BACKCHAIN(%r15) la %r3,0(%r3) - bras %r14,0f - .long ftrace_dyn_func -0: l %r14,0(%r14) + l %r14,0b-0b(%r1) l %r14,0(%r14) basr %r14,%r14 ahi %r15,96 - lm %r2,%r5,16(%r15) l %r14,56(%r15) +3: lm %r2,%r5,16(%r15) br %r14 .data @@ -102,21 +112,25 @@ ftrace_dyn_func: .globl _mcount _mcount: stm %r2,%r5,16(%r15) + bras %r1,2f +0: .long ftrace_trace_function +1: .long function_trace_stop +2: l %r2,1b-0b(%r1) + icm %r2,0xf,0(%r2) + jnz 3f st %r14,56(%r15) - lr %r1,%r15 + lr %r0,%r15 ahi %r15,-96 l %r3,100(%r15) la %r2,0(%r14) - st %r1,__SF_BACKCHAIN(%r15) + st %r0,__SF_BACKCHAIN(%r15) la %r3,0(%r3) - bras %r14,0f - .long ftrace_trace_function -0: l %r14,0(%r14) + l %r14,0b-0b(%r1) l %r14,0(%r14) basr %r14,%r14 ahi %r15,96 - lm %r2,%r5,16(%r15) l %r14,56(%r15) +3: lm %r2,%r5,16(%r15) br %r14 #endif /* CONFIG_DYNAMIC_FTRACE */ -- cgit v1.2.3 From 88dbd2037229bd2ed7543ffd0d8f2d9dec9d31d2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:46 +0200 Subject: [S390] ftrace: add function graph tracer support Function graph tracer support for s390. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 1 + arch/s390/include/asm/ftrace.h | 10 +++--- arch/s390/kernel/Makefile | 6 ++-- arch/s390/kernel/ftrace.c | 72 ++++++++++++++++++++++++++++++++++++++ arch/s390/kernel/mcount.S | 79 ++++++++++++++++++++++++++++++++++++++++++ arch/s390/kernel/s390_ext.c | 3 +- arch/s390/kernel/time.c | 2 +- arch/s390/kernel/vmlinux.lds.S | 1 + drivers/s390/cio/cio.c | 4 +-- 9 files changed, 166 insertions(+), 12 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 480590f21570..9023cc900bda 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -85,6 +85,7 @@ config S390 select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE + select HAVE_FUNCTION_GRAPH_TRACER select HAVE_DEFAULT_NO_SPIN_MUTEXES select HAVE_OPROFILE select HAVE_KPROBES diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index ba23d8f97d07..96c14a9102b8 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -11,11 +11,13 @@ struct dyn_arch_ftrace { }; #define MCOUNT_ADDR ((long)_mcount) #ifdef CONFIG_64BIT -#define MCOUNT_INSN_SIZE 24 -#define MCOUNT_OFFSET 14 +#define MCOUNT_OFFSET_RET 18 +#define MCOUNT_INSN_SIZE 24 +#define MCOUNT_OFFSET 14 #else -#define MCOUNT_INSN_SIZE 30 -#define MCOUNT_OFFSET 8 +#define MCOUNT_OFFSET_RET 26 +#define MCOUNT_INSN_SIZE 30 +#define MCOUNT_OFFSET 8 #endif static inline unsigned long ftrace_call_adjust(unsigned long addr) diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index ce172bfaab8a..c75ed43b1a18 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -3,11 +3,8 @@ # ifdef CONFIG_FUNCTION_TRACER -# Do not trace early boot code +# Don't trace early setup code and tracing code CFLAGS_REMOVE_early.o = -pg -endif - -ifdef CONFIG_DYNAMIC_FTRACE CFLAGS_REMOVE_ftrace.o = -pg endif @@ -46,6 +43,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o # Kexec part S390_KEXEC_OBJS := machine_kexec.o crash.o diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 0b81a784e039..c92a10953279 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -7,13 +7,17 @@ * */ +#include #include #include #include #include #include +#ifdef CONFIG_DYNAMIC_FTRACE + void ftrace_disable_code(void); +void ftrace_disable_return(void); void ftrace_call_code(void); void ftrace_nop_code(void); @@ -28,6 +32,7 @@ asm( " .word 0x0024\n" " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" " basr %r14,%r1\n" + "ftrace_disable_return:\n" " lg %r14,8(15)\n" " lgr %r0,%r0\n" "0:\n"); @@ -50,6 +55,7 @@ asm( " j 0f\n" " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" " basr %r14,%r1\n" + "ftrace_disable_return:\n" " l %r14,4(%r15)\n" " j 0f\n" " bcr 0,%r7\n" @@ -130,3 +136,69 @@ int __init ftrace_dyn_arch_init(void *data) *(unsigned long *)data = 0; return 0; } + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE +/* + * Patch the kernel code at ftrace_graph_caller location: + * The instruction there is branch relative on condition. The condition mask + * is either all ones (always branch aka disable ftrace_graph_caller) or all + * zeroes (nop aka enable ftrace_graph_caller). + * Instruction format for brc is a7m4xxxx where m is the condition mask. + */ +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned short opcode = 0xa704; + + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned short opcode = 0xa7f4; + + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); +} + +static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) +{ + return addr - (ftrace_disable_return - ftrace_disable_code); +} + +#else /* CONFIG_DYNAMIC_FTRACE */ + +static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) +{ + return addr - MCOUNT_OFFSET_RET; +} + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addresses + * in current thread info. + */ +unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) +{ + struct ftrace_graph_ent trace; + + /* Nmi's are currently unsupported. */ + if (unlikely(in_nmi())) + goto out; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + goto out; + if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY) + goto out; + trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; + /* Only trace if the calling function expects to. */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + goto out; + } + parent = (unsigned long)return_to_handler; +out: + return parent; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 0aa85ec94d08..2a0a5e97ba8c 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -34,6 +34,18 @@ ftrace_caller: larl %r14,ftrace_dyn_func lg %r14,0(%r14) basr %r14,%r14 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_caller +ftrace_graph_caller: + # This unconditional branch gets runtime patched. Change only if + # you know what you are doing. See ftrace_enable_graph_caller(). + j 0f + lg %r2,272(%r15) + lg %r3,168(%r15) + brasl %r14,prepare_ftrace_return + stg %r2,168(%r15) +0: +#endif aghi %r15,160 lmg %r2,%r5,32(%r15) lg %r14,112(%r15) @@ -62,6 +74,12 @@ _mcount: larl %r14,ftrace_trace_function lg %r14,0(%r14) basr %r14,%r14 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + lg %r2,272(%r15) + lg %r3,168(%r15) + brasl %r14,prepare_ftrace_return + stg %r2,168(%r15) +#endif aghi %r15,160 lmg %r2,%r5,32(%r15) lg %r14,112(%r15) @@ -69,6 +87,22 @@ _mcount: #endif /* CONFIG_DYNAMIC_FTRACE */ +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + + .globl return_to_handler +return_to_handler: + stmg %r2,%r5,32(%r15) + lgr %r1,%r15 + aghi %r15,-160 + stg %r1,__SF_BACKCHAIN(%r15) + brasl %r14,ftrace_return_to_handler + aghi %r15,160 + lgr %r14,%r2 + lmg %r2,%r5,32(%r15) + br %r14 + +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + #else /* CONFIG_64BIT */ #ifdef CONFIG_DYNAMIC_FTRACE @@ -96,6 +130,21 @@ ftrace_caller: l %r14,0b-0b(%r1) l %r14,0(%r14) basr %r14,%r14 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_caller +ftrace_graph_caller: + # This unconditional branch gets runtime patched. Change only if + # you know what you are doing. See ftrace_enable_graph_caller(). + j 1f + bras %r1,0f + .long prepare_ftrace_return +0: l %r2,152(%r15) + l %r4,0(%r1) + l %r3,100(%r15) + basr %r14,%r4 + st %r2,100(%r15) +1: +#endif ahi %r15,96 l %r14,56(%r15) 3: lm %r2,%r5,16(%r15) @@ -128,10 +177,40 @@ _mcount: l %r14,0b-0b(%r1) l %r14,0(%r14) basr %r14,%r14 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bras %r1,0f + .long prepare_ftrace_return +0: l %r2,152(%r15) + l %r4,0(%r1) + l %r3,100(%r15) + basr %r14,%r4 + st %r2,100(%r15) +#endif ahi %r15,96 l %r14,56(%r15) 3: lm %r2,%r5,16(%r15) br %r14 #endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + + .globl return_to_handler +return_to_handler: + stm %r2,%r5,16(%r15) + st %r14,56(%r15) + lr %r0,%r15 + ahi %r15,-96 + st %r0,__SF_BACKCHAIN(%r15) + bras %r1,0f + .long ftrace_return_to_handler +0: l %r2,0b-0b(%r1) + basr %r14,%r2 + lr %r14,%r2 + ahi %r15,96 + lm %r2,%r5,16(%r15) + br %r14 + +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + #endif /* CONFIG_64BIT */ diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index 6b0686d78fc7..0de305b598ce 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -112,7 +113,7 @@ int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler, return 0; } -void do_extint(struct pt_regs *regs, unsigned short code) +void __irq_entry do_extint(struct pt_regs *regs, unsigned short code) { ext_int_info_t *p; int index; diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index ad9a999aaa92..215330a2c128 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -70,7 +70,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); /* * Scheduler clock - returns current time in nanosec units. */ -unsigned long long sched_clock(void) +unsigned long long notrace sched_clock(void) { return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 89399b8756c2..a53db23ee092 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -34,6 +34,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IRQENTRY_TEXT *(.fixup) *(.gnu.warning) } :text = 0x0700 diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 9889f188c7c5..5ec7789bd9d8 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -12,6 +12,7 @@ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include #include #include #include @@ -626,8 +627,7 @@ out: * handlers). * */ -void -do_IRQ (struct pt_regs *regs) +void __irq_entry do_IRQ(struct pt_regs *regs) { struct tpi_info *tpi_info; struct subchannel *sch; -- cgit v1.2.3 From 9bf1226b33dc2f94fc37ffd70ee161e2bda1ff5c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:47 +0200 Subject: [S390] ftrace: add system call tracer support System call tracer support for s390. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 1 + arch/s390/include/asm/syscall.h | 1 + arch/s390/include/asm/thread_info.h | 2 ++ arch/s390/kernel/entry.S | 4 ++- arch/s390/kernel/entry64.S | 4 ++- arch/s390/kernel/ftrace.c | 56 +++++++++++++++++++++++++++++++++++++ arch/s390/kernel/ptrace.c | 7 +++++ 7 files changed, 73 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9023cc900bda..99dc3ded6b49 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -84,6 +84,7 @@ config S390 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FTRACE_SYSCALLS select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_GRAPH_TRACER select HAVE_DEFAULT_NO_SPIN_MUTEXES diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 2429b87eb28d..e0a73d3eb837 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -12,6 +12,7 @@ #ifndef _ASM_SYSCALL_H #define _ASM_SYSCALL_H 1 +#include #include static inline long syscall_get_nr(struct task_struct *task, diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 2f86653dda69..925bcc649035 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -92,6 +92,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SECCOMP 10 /* secure computing */ +#define TIF_SYSCALL_FTRACE 11 /* ftrace syscall instrumentation */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -110,6 +111,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_TRACE (1<>8 | _TIF_SYSCALL_AUDIT>>8 | _TIF_SECCOMP>>8) +_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ + _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT @@ -1108,6 +1109,7 @@ cleanup_io_leave_insn: .section .rodata, "a" #define SYSCALL(esa,esame,emu) .long esa + .globl sys_call_table sys_call_table: #include "syscalls.S" #undef SYSCALL diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 3cec9b504f5f..f6618e9e15ef 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -56,7 +56,8 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING) -_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | _TIF_SECCOMP>>8) +_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ + _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) #define BASED(name) name-system_call(%r13) @@ -1059,6 +1060,7 @@ cleanup_io_leave_insn: .section .rodata, "a" #define SYSCALL(esa,esame,emu) .long esame + .globl sys_call_table sys_call_table: #include "syscalls.S" #undef SYSCALL diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index c92a10953279..82ddfd3a75af 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #ifdef CONFIG_DYNAMIC_FTRACE @@ -202,3 +203,58 @@ out: return parent; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_FTRACE_SYSCALLS + +extern unsigned long __start_syscalls_metadata[]; +extern unsigned long __stop_syscalls_metadata[]; +extern unsigned int sys_call_table[]; + +static struct syscall_metadata **syscalls_metadata; + +struct syscall_metadata *syscall_nr_to_meta(int nr) +{ + if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) + return NULL; + + return syscalls_metadata[nr]; +} + +static struct syscall_metadata *find_syscall_meta(unsigned long syscall) +{ + struct syscall_metadata *start; + struct syscall_metadata *stop; + char str[KSYM_SYMBOL_LEN]; + + start = (struct syscall_metadata *)__start_syscalls_metadata; + stop = (struct syscall_metadata *)__stop_syscalls_metadata; + kallsyms_lookup(syscall, NULL, NULL, NULL, str); + + for ( ; start < stop; start++) { + if (start->name && !strcmp(start->name + 3, str + 3)) + return start; + } + return NULL; +} + +void arch_init_ftrace_syscalls(void) +{ + struct syscall_metadata *meta; + int i; + static atomic_t refs; + + if (atomic_inc_return(&refs) != 1) + goto out; + syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, + GFP_KERNEL); + if (!syscalls_metadata) + goto out; + for (i = 0; i < NR_syscalls; i++) { + meta = find_syscall_meta((unsigned long)sys_call_table[i]); + syscalls_metadata[i] = meta; + } + return; +out: + atomic_dec(&refs); +} +#endif diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index b6fc1ae2ffcb..490b39934d65 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -661,6 +662,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) ret = -1; } + if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) + ftrace_syscall_enter(regs); + if (unlikely(current->audit_context)) audit_syscall_entry(is_compat_task() ? AUDIT_ARCH_S390 : AUDIT_ARCH_S390X, @@ -676,6 +680,9 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]); + if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) + ftrace_syscall_exit(regs); + if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); } -- cgit v1.2.3 From fc39453debac927a3ba477bafbc6c18c7166ae78 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:48 +0200 Subject: [S390] wire up sys_rt_tgsigqueueinfo Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/unistd.h | 3 ++- arch/s390/kernel/compat_wrapper.S | 8 ++++++++ arch/s390/kernel/syscalls.S | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index f0f19e6ace6c..90557237172b 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -267,7 +267,8 @@ #define __NR_epoll_create1 327 #define __NR_preadv 328 #define __NR_pwritev 329 -#define NR_syscalls 330 +#define __NR_rt_tgsigqueueinfo 330 +#define NR_syscalls 331 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index fb38af6316bb..d2e7fbf5d65d 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1823,3 +1823,11 @@ compat_sys_pwritev_wrapper: llgfr %r5,%r5 # u32 llgfr %r6,%r6 # u32 jg compat_sys_pwritev # branch to system call + + .globl compat_sys_rt_tgsigqueueinfo_wrapper +compat_sys_rt_tgsigqueueinfo_wrapper: + lgfr %r2,%r2 # compat_pid_t + lgfr %r3,%r3 # compat_pid_t + lgfr %r4,%r4 # int + llgtr %r5,%r5 # struct compat_siginfo * + jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 2c7739fe70b1..82f89ef87d2e 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -338,3 +338,4 @@ SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper) SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) +SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ -- cgit v1.2.3 From 310d6b671588dd7695cbc0d09d02e41d94a42bed Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:49 +0200 Subject: [S390] wire up sys_perf_counter_open Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/unistd.h | 3 ++- arch/s390/kernel/compat_wrapper.S | 9 +++++++++ arch/s390/kernel/syscalls.S | 1 + 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 90557237172b..c80602d7c880 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -268,7 +268,8 @@ #define __NR_preadv 328 #define __NR_pwritev 329 #define __NR_rt_tgsigqueueinfo 330 -#define NR_syscalls 331 +#define __NR_perf_counter_open 331 +#define NR_syscalls 332 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index d2e7fbf5d65d..88a83366819f 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1831,3 +1831,12 @@ compat_sys_rt_tgsigqueueinfo_wrapper: lgfr %r4,%r4 # int llgtr %r5,%r5 # struct compat_siginfo * jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call + + .globl sys_perf_counter_open_wrapper +sys_perf_counter_open_wrapper: + llgtr %r2,%r2 # const struct perf_counter_attr * + lgfr %r3,%r3 # pid_t + lgfr %r4,%r4 # int + lgfr %r5,%r5 # int + llgfr %r6,%r6 # unsigned long + jg sys_perf_counter_open # branch to system call diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 82f89ef87d2e..ad1acd200385 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -339,3 +339,4 @@ SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ +SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper) -- cgit v1.2.3