diff options
Diffstat (limited to 'arch/sparc/include')
-rw-r--r-- | arch/sparc/include/asm/elf_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/extable_64.h | 20 | ||||
-rw-r--r-- | arch/sparc/include/asm/ftrace.h | 4 | ||||
-rw-r--r-- | arch/sparc/include/asm/hypervisor.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/irq_64.h | 5 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_64.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/smp_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_32.h | 13 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_64.h | 28 |
9 files changed, 50 insertions, 26 deletions
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 93310837c2df..3f2d403873bd 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -7,7 +7,7 @@ #include <asm/ptrace.h> #include <asm/processor.h> -#include <asm/uaccess.h> +#include <asm/extable_64.h> #include <asm/spitfire.h> /* diff --git a/arch/sparc/include/asm/extable_64.h b/arch/sparc/include/asm/extable_64.h new file mode 100644 index 000000000000..1121cb056ffb --- /dev/null +++ b/arch/sparc/include/asm/extable_64.h @@ -0,0 +1,20 @@ +#ifndef __ASM_EXTABLE64_H +#define __ASM_EXTABLE64_H +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + unsigned int insn, fixup; +}; + +#endif diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h index 3192a8e42fd6..62755a339a59 100644 --- a/arch/sparc/include/asm/ftrace.h +++ b/arch/sparc/include/asm/ftrace.h @@ -9,6 +9,10 @@ void _mcount(void); #endif +#endif /* CONFIG_MCOUNT */ + +#if defined(CONFIG_SPARC64) && !defined(CC_USE_FENTRY) +#define HAVE_FUNCTION_GRAPH_FP_TEST #endif #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index f5b6537306f0..666d5ba230d2 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -1744,6 +1744,7 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle, #define HV_PCI_MAP_ATTR_READ 0x01 #define HV_PCI_MAP_ATTR_WRITE 0x02 +#define HV_PCI_MAP_ATTR_RELAXED_ORDER 0x04 #define HV_PCI_DEVICE_BUILD(b,d,f) \ ((((b) & 0xff) << 16) | \ diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 3f70f900e834..1d51a11fb261 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h @@ -86,8 +86,9 @@ static inline unsigned long get_softint(void) return retval; } -void arch_trigger_all_cpu_backtrace(bool); -#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace +void arch_trigger_cpumask_backtrace(const struct cpumask *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace extern void *hardirq_stack[NR_CPUS]; extern void *softirq_stack[NR_CPUS]; diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 8c2a8c937540..c1263fc390db 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -25,6 +25,7 @@ #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index 26d9e7726867..ce2233f7e662 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h @@ -43,6 +43,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask); int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) +void smp_fill_in_cpu_possible_map(void); void smp_fill_in_sib_core_maps(void); void cpu_play_dead(void); @@ -72,6 +73,7 @@ void __cpu_die(unsigned int cpu); #define smp_fill_in_sib_core_maps() do { } while (0) #define smp_fetch_global_regs() do { } while (0) #define smp_fetch_global_pmu() do { } while (0) +#define smp_fill_in_cpu_possible_map() do { } while (0) #endif /* !(CONFIG_SMP) */ diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 57aca2792d29..ea55f86d7ccd 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -248,23 +248,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) { - if (n && __access_ok((unsigned long) to, n)) + if (n && __access_ok((unsigned long) to, n)) { + check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); - else + } else return n; } static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { + check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); } static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { - if (n && __access_ok((unsigned long) from, n)) + if (n && __access_ok((unsigned long) from, n)) { + check_object_size(to, n, false); return __copy_user((__force void __user *) to, from, n); - else + } else { + memset(to, 0, n); return n; + } } static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index e9a51d64974d..b68acc563235 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -13,6 +13,7 @@ #include <asm/asi.h> #include <asm/spitfire.h> #include <asm-generic/uaccess-unaligned.h> +#include <asm/extable_64.h> #endif #ifndef __ASSEMBLY__ @@ -81,23 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si return 1; } -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry { - unsigned int insn, fixup; -}; - void __ret_efault(void); void __retl_efault(void); @@ -210,8 +194,11 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from, static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long size) { - unsigned long ret = ___copy_from_user(to, from, size); + unsigned long ret; + + check_object_size(to, size, false); + ret = ___copy_from_user(to, from, size); if (unlikely(ret)) ret = copy_from_user_fixup(to, from, size); @@ -227,8 +214,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from, static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long size) { - unsigned long ret = ___copy_to_user(to, from, size); + unsigned long ret; + + check_object_size(from, size, true); + ret = ___copy_to_user(to, from, size); if (unlikely(ret)) ret = copy_to_user_fixup(to, from, size); return ret; |