diff options
Diffstat (limited to 'arch/sparc/include')
-rw-r--r-- | arch/sparc/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/elf_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/extable_64.h | 20 | ||||
-rw-r--r-- | arch/sparc/include/asm/hypervisor.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/irq_64.h | 5 | ||||
-rw-r--r-- | arch/sparc/include/asm/string.h | 34 | ||||
-rw-r--r-- | arch/sparc/include/asm/string_32.h | 56 | ||||
-rw-r--r-- | arch/sparc/include/asm/string_64.h | 44 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_64.h | 18 |
9 files changed, 61 insertions, 120 deletions
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 6024c26c0585..cfc918067f80 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h +generic-y += export.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += linkage.h diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 93310837c2df..3f2d403873bd 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -7,7 +7,7 @@ #include <asm/ptrace.h> #include <asm/processor.h> -#include <asm/uaccess.h> +#include <asm/extable_64.h> #include <asm/spitfire.h> /* diff --git a/arch/sparc/include/asm/extable_64.h b/arch/sparc/include/asm/extable_64.h new file mode 100644 index 000000000000..1121cb056ffb --- /dev/null +++ b/arch/sparc/include/asm/extable_64.h @@ -0,0 +1,20 @@ +#ifndef __ASM_EXTABLE64_H +#define __ASM_EXTABLE64_H +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + unsigned int insn, fixup; +}; + +#endif diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index f5b6537306f0..666d5ba230d2 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -1744,6 +1744,7 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle, #define HV_PCI_MAP_ATTR_READ 0x01 #define HV_PCI_MAP_ATTR_WRITE 0x02 +#define HV_PCI_MAP_ATTR_RELAXED_ORDER 0x04 #define HV_PCI_DEVICE_BUILD(b,d,f) \ ((((b) & 0xff) << 16) | \ diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 3f70f900e834..1d51a11fb261 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h @@ -86,8 +86,9 @@ static inline unsigned long get_softint(void) return retval; } -void arch_trigger_all_cpu_backtrace(bool); -#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace +void arch_trigger_cpumask_backtrace(const struct cpumask *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace extern void *hardirq_stack[NR_CPUS]; extern void *softirq_stack[NR_CPUS]; diff --git a/arch/sparc/include/asm/string.h b/arch/sparc/include/asm/string.h index 98b72a0c8e6e..86f34be14ce0 100644 --- a/arch/sparc/include/asm/string.h +++ b/arch/sparc/include/asm/string.h @@ -5,4 +5,38 @@ #else #include <asm/string_32.h> #endif + +/* First the mem*() things. */ +#define __HAVE_ARCH_MEMMOVE +void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMCPY +#define memcpy(t, f, n) __builtin_memcpy(t, f, n) + +#define __HAVE_ARCH_MEMSET +#define memset(s, c, count) __builtin_memset(s, c, count) + +#define __HAVE_ARCH_MEMSCAN + +#define memscan(__arg0, __char, __arg2) \ +({ \ + void *__memscan_zero(void *, size_t); \ + void *__memscan_generic(void *, int, size_t); \ + void *__retval, *__addr = (__arg0); \ + size_t __size = (__arg2); \ + \ + if(__builtin_constant_p(__char) && !(__char)) \ + __retval = __memscan_zero(__addr, __size); \ + else \ + __retval = __memscan_generic(__addr, (__char), __size); \ + \ + __retval; \ +}) + +#define __HAVE_ARCH_MEMCMP +int memcmp(const void *,const void *,__kernel_size_t); + +#define __HAVE_ARCH_STRNCMP +int strncmp(const char *, const char *, __kernel_size_t); + #endif diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h index 69974e924611..649412476a69 100644 --- a/arch/sparc/include/asm/string_32.h +++ b/arch/sparc/include/asm/string_32.h @@ -11,60 +11,4 @@ #include <asm/page.h> -/* Really, userland/ksyms should not see any of this stuff. */ - -#ifdef __KERNEL__ - -void __memmove(void *,const void *,__kernel_size_t); - -#ifndef EXPORT_SYMTAB_STROPS - -/* First the mem*() things. */ -#define __HAVE_ARCH_MEMMOVE -#undef memmove -#define memmove(_to, _from, _n) \ -({ \ - void *_t = (_to); \ - __memmove(_t, (_from), (_n)); \ - _t; \ -}) - -#define __HAVE_ARCH_MEMCPY -#define memcpy(t, f, n) __builtin_memcpy(t, f, n) - -#define __HAVE_ARCH_MEMSET -#define memset(s, c, count) __builtin_memset(s, c, count) - -#define __HAVE_ARCH_MEMSCAN - -#undef memscan -#define memscan(__arg0, __char, __arg2) \ -({ \ - void *__memscan_zero(void *, size_t); \ - void *__memscan_generic(void *, int, size_t); \ - void *__retval, *__addr = (__arg0); \ - size_t __size = (__arg2); \ - \ - if(__builtin_constant_p(__char) && !(__char)) \ - __retval = __memscan_zero(__addr, __size); \ - else \ - __retval = __memscan_generic(__addr, (__char), __size); \ - \ - __retval; \ -}) - -#define __HAVE_ARCH_MEMCMP -int memcmp(const void *,const void *,__kernel_size_t); - -/* Now the str*() stuff... */ -#define __HAVE_ARCH_STRLEN -__kernel_size_t strlen(const char *); - -#define __HAVE_ARCH_STRNCMP -int strncmp(const char *, const char *, __kernel_size_t); - -#endif /* !EXPORT_SYMTAB_STROPS */ - -#endif /* __KERNEL__ */ - #endif /* !(__SPARC_STRING_H__) */ diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h index 5936b8ff3c05..6b9ccb308605 100644 --- a/arch/sparc/include/asm/string_64.h +++ b/arch/sparc/include/asm/string_64.h @@ -9,54 +9,10 @@ #ifndef __SPARC64_STRING_H__ #define __SPARC64_STRING_H__ -/* Really, userland/ksyms should not see any of this stuff. */ - -#ifdef __KERNEL__ - #include <asm/asi.h> -#ifndef EXPORT_SYMTAB_STROPS - -/* First the mem*() things. */ -#define __HAVE_ARCH_MEMMOVE -void *memmove(void *, const void *, __kernel_size_t); - -#define __HAVE_ARCH_MEMCPY -#define memcpy(t, f, n) __builtin_memcpy(t, f, n) - -#define __HAVE_ARCH_MEMSET -#define memset(s, c, count) __builtin_memset(s, c, count) - -#define __HAVE_ARCH_MEMSCAN - -#undef memscan -#define memscan(__arg0, __char, __arg2) \ -({ \ - void *__memscan_zero(void *, size_t); \ - void *__memscan_generic(void *, int, size_t); \ - void *__retval, *__addr = (__arg0); \ - size_t __size = (__arg2); \ - \ - if(__builtin_constant_p(__char) && !(__char)) \ - __retval = __memscan_zero(__addr, __size); \ - else \ - __retval = __memscan_generic(__addr, (__char), __size); \ - \ - __retval; \ -}) - -#define __HAVE_ARCH_MEMCMP -int memcmp(const void *,const void *,__kernel_size_t); - /* Now the str*() stuff... */ #define __HAVE_ARCH_STRLEN __kernel_size_t strlen(const char *); -#define __HAVE_ARCH_STRNCMP -int strncmp(const char *, const char *, __kernel_size_t); - -#endif /* !EXPORT_SYMTAB_STROPS */ - -#endif /* __KERNEL__ */ - #endif /* !(__SPARC64_STRING_H__) */ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 37a315d0ddd4..b68acc563235 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -13,6 +13,7 @@ #include <asm/asi.h> #include <asm/spitfire.h> #include <asm-generic/uaccess-unaligned.h> +#include <asm/extable_64.h> #endif #ifndef __ASSEMBLY__ @@ -81,23 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si return 1; } -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry { - unsigned int insn, fixup; -}; - void __ret_efault(void); void __retl_efault(void); |