diff options
Diffstat (limited to 'arch/sparc')
| -rw-r--r-- | arch/sparc/Kconfig | 1 | ||||
| -rw-r--r-- | arch/sparc/include/asm/processor_32.h | 2 | ||||
| -rw-r--r-- | arch/sparc/include/asm/processor_64.h | 1 | ||||
| -rw-r--r-- | arch/sparc/lib/mcount.S | 10 | ||||
| -rw-r--r-- | arch/sparc/net/bpf_jit_comp.c | 4 |
5 files changed, 7 insertions, 11 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 407c87d9879a..4692c90936f1 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -55,7 +55,6 @@ config SPARC64 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_FP_TEST - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KRETPROBES select HAVE_KPROBES select HAVE_RCU_TABLE_FREE if SMP diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index a564817bbc2e..812fd08f3e62 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h @@ -119,6 +119,8 @@ extern struct task_struct *last_task_used_math; int do_mathemu(struct pt_regs *regs, struct task_struct *fpt); #define cpu_relax() barrier() +#define cpu_relax_lowlatency() cpu_relax() + extern void (*sparc_idle)(void); #endif diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 7028fe1a7c04..6924bdefe148 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -216,6 +216,7 @@ unsigned long get_wchan(struct task_struct *task); "nop\n\t" \ ".previous" \ ::: "memory") +#define cpu_relax_lowlatency() cpu_relax() /* Prefetch support. This is tuned for UltraSPARC-III and later. * UltraSPARC-I will treat these as nops, and UltraSPARC-II has diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 3ad6cbdc2163..0b0ed4d34219 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S @@ -24,10 +24,7 @@ mcount: #ifdef CONFIG_DYNAMIC_FTRACE /* Do nothing, the retl/nop below is all we need. */ #else - sethi %hi(function_trace_stop), %g1 - lduw [%g1 + %lo(function_trace_stop)], %g2 - brnz,pn %g2, 2f - sethi %hi(ftrace_trace_function), %g1 + sethi %hi(ftrace_trace_function), %g1 sethi %hi(ftrace_stub), %g2 ldx [%g1 + %lo(ftrace_trace_function)], %g1 or %g2, %lo(ftrace_stub), %g2 @@ -80,11 +77,8 @@ ftrace_stub: .globl ftrace_caller .type ftrace_caller,#function ftrace_caller: - sethi %hi(function_trace_stop), %g1 mov %i7, %g2 - lduw [%g1 + %lo(function_trace_stop)], %g1 - brnz,pn %g1, ftrace_stub - mov %fp, %g3 + mov %fp, %g3 save %sp, -176, %sp mov %g2, %o1 mov %g2, %l0 diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 892a102671ad..1f76c22a6a75 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -354,7 +354,7 @@ do { *prog++ = BR_OPC | WDISP22(OFF); \ * emit_jump() calls with adjusted offsets. */ -void bpf_jit_compile(struct sk_filter *fp) +void bpf_jit_compile(struct bpf_prog *fp) { unsigned int cleanup_addr, proglen, oldproglen = 0; u32 temp[8], *prog, *func, seen = 0, pass; @@ -808,7 +808,7 @@ out: return; } -void bpf_jit_free(struct sk_filter *fp) +void bpf_jit_free(struct bpf_prog *fp) { if (fp->jited) module_free(NULL, fp->bpf_func); |