diff options
Diffstat (limited to 'arch/parisc')
50 files changed, 1497 insertions, 555 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 43c1c880def6..864b4756dcdf 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -10,10 +10,12 @@ config PARISC select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_PTE_SPECIAL select ARCH_NO_SG_CHAIN select ARCH_SUPPORTS_HUGETLBFS if PA20 select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_STACKWALK + select ARCH_HAS_DEBUG_VM_PGTABLE select HAVE_RELIABLE_STACKTRACE select DMA_OPS select RTC_CLASS @@ -259,18 +261,6 @@ config PARISC_PAGE_SIZE_64KB endchoice -config PARISC_SELF_EXTRACT - bool "Build kernel as self-extracting executable" - default y - help - Say Y if you want to build the parisc kernel as a kind of - self-extracting executable. - - If you say N here, the kernel will be compressed with gzip - which can be loaded by the palo bootloader directly too. - - If you don't know what to do here, say Y. - config SMP bool "Symmetric multi-processing support" help diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 82d77f4b0d08..2a9387a93592 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -15,12 +15,8 @@ # Mike Shaver, Helge Deller and Martin K. Petersen # -ifdef CONFIG_PARISC_SELF_EXTRACT boot := arch/parisc/boot KBUILD_IMAGE := $(boot)/bzImage -else -KBUILD_IMAGE := vmlinuz -endif NM = sh $(srctree)/arch/parisc/nm CHECKFLAGS += -D__hppa__=1 @@ -44,6 +40,16 @@ endif export LD_BFD +# Set default 32 bits cross compilers for vdso +CC_ARCHES_32 = hppa hppa2.0 hppa1.1 +CC_SUFFIXES = linux linux-gnu unknown-linux-gnu +CROSS32_COMPILE := $(call cc-cross-prefix, \ + $(foreach a,$(CC_ARCHES_32), \ + $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) +CROSS32CC := $(CROSS32_COMPILE)gcc +export CROSS32CC + +# Set default cross compiler for kernel build ifdef cross_compiling ifeq ($(CROSS_COMPILE),) CC_SUFFIXES = linux linux-gnu unknown-linux-gnu @@ -155,14 +161,29 @@ Image: vmlinux bzImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ -ifdef CONFIG_PARISC_SELF_EXTRACT vmlinuz: bzImage $(OBJCOPY) $(boot)/bzImage $@ -else -vmlinuz: vmlinux - @$(KGZIP) -cf -9 $< > $@ + +ifeq ($(KBUILD_EXTMOD),) +# We need to generate vdso-offsets.h before compiling certain files in kernel/. +# In order to do that, we should use the archprepare target, but we can't since +# asm-offsets.h is included in some files used to generate vdso-offsets.h, and +# asm-offsets.h is built in prepare0, for which archprepare is a dependency. +# Therefore we need to generate the header after prepare0 has been made, hence +# this hack. +prepare: vdso_prepare +vdso_prepare: prepare0 + $(if $(CONFIG_64BIT),$(Q)$(MAKE) \ + $(build)=arch/parisc/kernel/vdso64 include/generated/vdso64-offsets.h) + $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 include/generated/vdso32-offsets.h endif +PHONY += vdso_install + +vdso_install: + $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso $@ + $(if $(CONFIG_COMPAT_VDSO), \ + $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 $@) install: $(CONFIG_SHELL) $(srctree)/arch/parisc/install.sh \ $(KERNELRELEASE) vmlinux System.map "$(INSTALL_PATH)" diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig index 53061cb2cf7f..a5fee10d76ee 100644 --- a/arch/parisc/configs/generic-32bit_defconfig +++ b/arch/parisc/configs/generic-32bit_defconfig @@ -210,7 +210,6 @@ CONFIG_TMPFS_XATTR=y CONFIG_NFS_FS=m # CONFIG_NFS_V2 is not set CONFIG_NFSD=m -CONFIG_NFSD_V3=y CONFIG_CIFS=m CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index 6369082c6c74..ea0cb318b13d 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -47,6 +47,12 @@ #define PRIV_USER 3 #define PRIV_KERNEL 0 +/* Space register used inside kernel */ +#define SR_KERNEL 0 +#define SR_TEMP1 1 +#define SR_TEMP2 2 +#define SR_USER 3 + #ifdef __ASSEMBLY__ #ifdef CONFIG_64BIT diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index d53e9e27dba0..5032e758594e 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -39,16 +39,13 @@ extern int icache_stride; extern struct pdc_cache_info cache_info; void parisc_setup_cache_timing(void); -#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \ +#define pdtlb(sr, addr) asm volatile("pdtlb 0(%%sr%0,%1)" \ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ - : : "r" (addr) : "memory") -#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \ + : : "i"(sr), "r" (addr) : "memory") +#define pitlb(sr, addr) asm volatile("pitlb 0(%%sr%0,%1)" \ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \ - : : "r" (addr) : "memory") -#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \ - ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ - : : "r" (addr) : "memory") + : : "i"(sr), "r" (addr) : "memory") #define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 859b8a34adcf..e8b4a03343d3 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -9,16 +9,11 @@ /* The usual comment is "Caches aren't brain-dead on the <architecture>". * Unfortunately, that doesn't apply to PA-RISC. */ -/* Internal implementation */ -void flush_data_cache_local(void *); /* flushes local data-cache only */ -void flush_instruction_cache_local(void *); /* flushes local code-cache only */ -#ifdef CONFIG_SMP -void flush_data_cache(void); /* flushes data-cache only (all processors) */ -void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ -#else -#define flush_data_cache() flush_data_cache_local(NULL) -#define flush_instruction_cache() flush_instruction_cache_local(NULL) -#endif +#include <linux/jump_label.h> + +DECLARE_STATIC_KEY_TRUE(parisc_has_cache); +DECLARE_STATIC_KEY_TRUE(parisc_has_dcache); +DECLARE_STATIC_KEY_TRUE(parisc_has_icache); #define flush_cache_dup_mm(mm) flush_cache_mm(mm) diff --git a/arch/parisc/include/asm/current.h b/arch/parisc/include/asm/current.h index 568b739e42af..dc7aea07c3f3 100644 --- a/arch/parisc/include/asm/current.h +++ b/arch/parisc/include/asm/current.h @@ -2,14 +2,16 @@ #ifndef _ASM_PARISC_CURRENT_H #define _ASM_PARISC_CURRENT_H -#include <asm/special_insns.h> - #ifndef __ASSEMBLY__ struct task_struct; static __always_inline struct task_struct *get_current(void) { - return (struct task_struct *) mfctl(30); + struct task_struct *ts; + + /* do not use mfctl() macro as it is marked volatile */ + asm( "mfctl %%cr30,%0" : "=r" (ts) ); + return ts; } #define current get_current() diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h index 3bd465a27791..cc426d365892 100644 --- a/arch/parisc/include/asm/elf.h +++ b/arch/parisc/include/asm/elf.h @@ -359,4 +359,19 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *); #define arch_randomize_brk arch_randomize_brk + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int executable_stack); +#define VDSO_AUX_ENT(a, b) NEW_AUX_ENT(a, b) +#define VDSO_CURRENT_BASE current->mm->context.vdso_base + +#define ARCH_DLINFO \ +do { \ + if (VDSO_CURRENT_BASE) { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE);\ + } \ +} while (0) + #endif diff --git a/arch/parisc/include/asm/kprobes.h b/arch/parisc/include/asm/kprobes.h index 904034da4974..0a175ac87698 100644 --- a/arch/parisc/include/asm/kprobes.h +++ b/arch/parisc/include/asm/kprobes.h @@ -18,8 +18,9 @@ #include <linux/notifier.h> #define PARISC_KPROBES_BREAK_INSN 0x3ff801f +#define PARISC_KPROBES_BREAK_INSN2 0x3ff801e #define __ARCH_WANT_KPROBES_INSN_SLOT -#define MAX_INSN_SIZE 1 +#define MAX_INSN_SIZE 2 typedef u32 kprobe_opcode_t; struct kprobe; @@ -29,7 +30,7 @@ void arch_remove_kprobe(struct kprobe *p); #define flush_insn_slot(p) \ flush_icache_range((unsigned long)&(p)->ainsn.insn[0], \ (unsigned long)&(p)->ainsn.insn[0] + \ - sizeof(kprobe_opcode_t)) + MAX_INSN_SIZE*sizeof(kprobe_opcode_t)) #define kretprobe_blacklist_size 0 diff --git a/arch/parisc/include/asm/mmu.h b/arch/parisc/include/asm/mmu.h index 3fb70a601d5c..44fd062b62ed 100644 --- a/arch/parisc/include/asm/mmu.h +++ b/arch/parisc/include/asm/mmu.h @@ -2,7 +2,9 @@ #ifndef _PARISC_MMU_H_ #define _PARISC_MMU_H_ -/* On parisc, we store the space id here */ -typedef unsigned long mm_context_t; +typedef struct { + unsigned long space_id; + unsigned long vdso_base; +} mm_context_t; #endif /* _PARISC_MMU_H_ */ diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 726257648d9f..c9187fe836a3 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -20,7 +20,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { BUG_ON(atomic_read(&mm->mm_users) != 1); - mm->context = alloc_sid(); + mm->context.space_id = alloc_sid(); return 0; } @@ -28,22 +28,22 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) static inline void destroy_context(struct mm_struct *mm) { - free_sid(mm->context); - mm->context = 0; + free_sid(mm->context.space_id); + mm->context.space_id = 0; } static inline unsigned long __space_to_prot(mm_context_t context) { #if SPACEID_SHIFT == 0 - return context << 1; + return context.space_id << 1; #else - return context >> (SPACEID_SHIFT - 1); + return context.space_id >> (SPACEID_SHIFT - 1); #endif } static inline void load_context(mm_context_t context) { - mtsp(context, 3); + mtsp(context.space_id, SR_USER); mtctl(__space_to_prot(context), 8); } @@ -89,8 +89,8 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) BUG_ON(next == &init_mm); /* Should never happen */ - if (next->context == 0) - next->context = alloc_sid(); + if (next->context.space_id == 0) + next->context.space_id = alloc_sid(); switch_mm(prev,next,current); } diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 48d49f207f84..939db6fe620b 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -70,9 +70,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) unsigned long flags; purge_tlb_start(flags); - mtsp(mm->context, 1); - pdtlb(addr); - pitlb(addr); + mtsp(mm->context.space_id, SR_TEMP1); + pdtlb(SR_TEMP1, addr); + pitlb(SR_TEMP1, addr); purge_tlb_end(flags); } @@ -219,9 +219,10 @@ extern void __update_cache(pte_t pte); #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) +#define _PAGE_SPECIAL (_PAGE_DMB) #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL) #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC) #define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE) @@ -348,6 +349,7 @@ static inline void pud_clear(pud_t *pud) { static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } +static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } @@ -355,6 +357,7 @@ static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; ret static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } /* * Huge pte definitions. diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 3a3d05438408..006364212795 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -236,7 +236,7 @@ on downward growing arches, it looks like this: #define start_thread(regs, new_pc, new_sp) do { \ elf_addr_t *sp = (elf_addr_t *)new_sp; \ - __u32 spaceid = (__u32)current->mm->context; \ + __u32 spaceid = (__u32)current->mm->context.space_id; \ elf_addr_t pc = (elf_addr_t)new_pc | 3; \ elf_caddr_t *argv = (elf_caddr_t *)bprm->exec + 1; \ \ diff --git a/arch/parisc/include/asm/rt_sigframe.h b/arch/parisc/include/asm/rt_sigframe.h index 2b3010ade00e..bb7fb4153327 100644 --- a/arch/parisc/include/asm/rt_sigframe.h +++ b/arch/parisc/include/asm/rt_sigframe.h @@ -2,16 +2,8 @@ #ifndef _ASM_PARISC_RT_SIGFRAME_H #define _ASM_PARISC_RT_SIGFRAME_H -#define SIGRETURN_TRAMP 4 -#define SIGRESTARTBLOCK_TRAMP 5 -#define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP) - struct rt_sigframe { - /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c - Secondary to that it must protect the ERESTART_RESTARTBLOCK - trampoline we left on the stack (we were bad and didn't - change sp so we could run really fast.) */ - unsigned int tramp[TRAMP_SIZE]; + unsigned int tramp[2]; /* holds original return address */ struct siginfo info; struct ucontext uc; }; diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h index 16ee41e77174..41b3ddbd344c 100644 --- a/arch/parisc/include/asm/special_insns.h +++ b/arch/parisc/include/asm/special_insns.h @@ -55,8 +55,8 @@ static inline void set_eiem(unsigned long val) #define mfsp(reg) ({ \ unsigned long cr; \ __asm__ __volatile__( \ - "mfsp " #reg ",%0" : \ - "=r" (cr) \ + "mfsp %%sr%1,%0" \ + : "=r" (cr) : "i"(reg) \ ); \ cr; \ }) diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h index c5ded01d45be..5ffd7c17f593 100644 --- a/arch/parisc/include/asm/tlbflush.h +++ b/arch/parisc/include/asm/tlbflush.h @@ -17,7 +17,7 @@ int __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end); #define flush_tlb_range(vma, start, end) \ - __flush_tlb_range((vma)->vm_mm->context, start, end) + __flush_tlb_range((vma)->vm_mm->context.space_id, start, end) #define flush_tlb_kernel_range(start, end) \ __flush_tlb_range(0, start, end) diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h index 34619f010c63..0ccdb738a9a3 100644 --- a/arch/parisc/include/asm/traps.h +++ b/arch/parisc/include/asm/traps.h @@ -18,6 +18,7 @@ unsigned long parisc_acctyp(unsigned long code, unsigned int inst); const char *trap_name(unsigned long code); void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address); +int handle_nadtlb_fault(struct pt_regs *regs); #endif #endif diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 123d5f16cd9d..b3eb4541e441 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -79,18 +79,18 @@ struct exception_table_entry { #define __get_user(val, ptr) \ ({ \ - __get_user_internal("%%sr3,", val, ptr); \ + __get_user_internal(SR_USER, val, ptr); \ }) #define __get_user_asm(sr, val, ldx, ptr) \ { \ register long __gu_val; \ \ - __asm__("1: " ldx " 0(" sr "%2),%0\n" \ + __asm__("1: " ldx " 0(%%sr%2,%3),%0\n" \ "9:\n" \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ : "=r"(__gu_val), "+r"(__gu_err) \ - : "r"(ptr)); \ + : "i"(sr), "r"(ptr)); \ \ (val) = (__force __typeof__(*(ptr))) __gu_val; \ } @@ -100,7 +100,7 @@ struct exception_table_entry { { \ type __z; \ long __err; \ - __err = __get_user_internal("%%sr0,", __z, (type *)(src)); \ + __err = __get_user_internal(SR_KERNEL, __z, (type *)(src)); \ if (unlikely(__err)) \ goto err_label; \ else \ @@ -118,13 +118,13 @@ struct exception_table_entry { } __gu_tmp; \ \ __asm__(" copy %%r0,%R0\n" \ - "1: ldw 0(" sr "%2),%0\n" \ - "2: ldw 4(" sr "%2),%R0\n" \ + "1: ldw 0(%%sr%2,%3),%0\n" \ + "2: ldw 4(%%sr%2,%3),%R0\n" \ "9:\n" \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ : "=&r"(__gu_tmp.l), "+r"(__gu_err) \ - : "r"(ptr)); \ + : "i"(sr), "r"(ptr)); \ \ (val) = __gu_tmp.t; \ } @@ -151,14 +151,14 @@ struct exception_table_entry { ({ \ __typeof__(&*(ptr)) __ptr = ptr; \ __typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x); \ - __put_user_internal("%%sr3,", __x, __ptr); \ + __put_user_internal(SR_USER, __x, __ptr); \ }) #define __put_kernel_nofault(dst, src, type, err_label) \ { \ type __z = *(type *)(src); \ long __err; \ - __err = __put_user_internal("%%sr0,", __z, (type *)(dst)); \ + __err = __put_user_internal(SR_KERNEL, __z, (type *)(dst)); \ if (unlikely(__err)) \ goto err_label; \ } @@ -178,24 +178,24 @@ struct exception_table_entry { #define __put_user_asm(sr, stx, x, ptr) \ __asm__ __volatile__ ( \ - "1: " stx " %2,0(" sr "%1)\n" \ + "1: " stx " %1,0(%%sr%2,%3)\n" \ "9:\n" \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ : "+r"(__pu_err) \ - : "r"(ptr), "r"(x)) + : "r"(x), "i"(sr), "r"(ptr)) #if !defined(CONFIG_64BIT) #define __put_user_asm64(sr, __val, ptr) do { \ __asm__ __volatile__ ( \ - "1: stw %2,0(" sr "%1)\n" \ - "2: stw %R2,4(" sr "%1)\n" \ + "1: stw %1,0(%%sr%2,%3)\n" \ + "2: stw %R1,4(%%sr%2,%3)\n" \ "9:\n" \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ : "+r"(__pu_err) \ - : "r"(ptr), "r"(__val)); \ + : "r"(__val), "i"(sr), "r"(ptr)); \ } while (0) #endif /* !defined(CONFIG_64BIT) */ diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index cd438e4150f6..7708a5806f09 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -63,10 +63,6 @@ ); \ __sys_res = (long)__res; \ } \ - if ( (unsigned long)__sys_res >= (unsigned long)-4095 ){ \ - errno = -__sys_res; \ - __sys_res = -1; \ - } \ __sys_res; \ }) diff --git a/arch/parisc/include/asm/vdso.h b/arch/parisc/include/asm/vdso.h new file mode 100644 index 000000000000..ef8206193f82 --- /dev/null +++ b/arch/parisc/include/asm/vdso.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PARISC_VDSO_H__ +#define __PARISC_VDSO_H__ + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_64BIT +#include <generated/vdso64-offsets.h> +#endif +#include <generated/vdso32-offsets.h> + +#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name)) +#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name)) + +extern struct vdso_data *vdso_data; + +#endif /* __ASSEMBLY __ */ + +/* Default link addresses for the vDSOs */ +#define VDSO_LBASE 0 + +#define VDSO_VERSION_STRING LINUX_5.18 + +#endif /* __PARISC_VDSO_H__ */ diff --git a/arch/parisc/include/uapi/asm/auxvec.h b/arch/parisc/include/uapi/asm/auxvec.h new file mode 100644 index 000000000000..90d2aa699cf3 --- /dev/null +++ b/arch/parisc/include/uapi/asm/auxvec.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_PARISC_AUXVEC_H +#define _UAPI_PARISC_AUXVEC_H + +/* The vDSO location. */ +#define AT_SYSINFO_EHDR 33 + +#endif /* _UAPI_PARISC_AUXVEC_H */ diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 8fb819bbbb17..d579243edc2f 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -39,3 +39,8 @@ obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o obj-$(CONFIG_KEXEC_FILE) += kexec_file.o + +# vdso +obj-y += vdso.o +obj-$(CONFIG_64BIT) += vdso64/ +obj-y += vdso32/ diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c index fa28c4c9f972..daa1e9047275 100644 --- a/arch/parisc/kernel/alternative.c +++ b/arch/parisc/kernel/alternative.c @@ -8,6 +8,7 @@ #include <asm/processor.h> #include <asm/sections.h> #include <asm/alternative.h> +#include <asm/cacheflush.h> #include <linux/module.h> @@ -107,5 +108,14 @@ void __init apply_alternatives_all(void) apply_alternatives((struct alt_instr *) &__alt_instructions, (struct alt_instr *) &__alt_instructions_end, NULL); + if (cache_info.dc_size == 0 && cache_info.ic_size == 0) { + pr_info("alternatives: optimizing cache-flushes.\n"); + static_branch_disable(&parisc_has_cache); + } + if (cache_info.dc_size == 0) + static_branch_disable(&parisc_has_dcache); + if (cache_info.ic_size == 0) + static_branch_disable(&parisc_has_icache); + set_kernel_text_rw(0); } diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 2a83ef36d216..2673d57eeb00 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -26,7 +26,11 @@ #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/pdc.h> +#include <uapi/asm/sigcontext.h> +#include <asm/ucontext.h> +#include <asm/rt_sigframe.h> #include <linux/uaccess.h> +#include "signal32.h" /* Add FRAME_SIZE to the size x and align it to y. All definitions * that use align_frame will include space for a frame. @@ -218,6 +222,11 @@ int main(void) DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PRE_COUNT, offsetof(struct task_struct, thread_info.preempt_count)); BLANK(); + DEFINE(ASM_SIGFRAME_SIZE, PARISC_RT_SIGFRAME_SIZE); + DEFINE(SIGFRAME_CONTEXT_REGS, offsetof(struct rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE); + DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE32); + DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct compat_rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE32); + BLANK(); DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count)); diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 94150b91c96f..456e879d34a8 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -38,6 +38,9 @@ EXPORT_SYMBOL(flush_dcache_page_asm); void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); +/* Internal implementation in arch/parisc/kernel/pacache.S */ +void flush_data_cache_local(void *); /* flushes local data-cache only */ +void flush_instruction_cache_local(void); /* flushes local code-cache only */ /* On some machines (i.e., ones with the Merced bus), there can be * only a single PxTLB broadcast at a time; this must be guaranteed @@ -58,26 +61,35 @@ struct pdc_cache_info cache_info __ro_after_init; static struct pdc_btlb_info btlb_info __ro_after_init; #endif -#ifdef CONFIG_SMP -void -flush_data_cache(void) +DEFINE_STATIC_KEY_TRUE(parisc_has_cache); +DEFINE_STATIC_KEY_TRUE(parisc_has_dcache); +DEFINE_STATIC_KEY_TRUE(parisc_has_icache); + +static void cache_flush_local_cpu(void *dummy) { - on_each_cpu(flush_data_cache_local, NULL, 1); + if (static_branch_likely(&parisc_has_icache)) + flush_instruction_cache_local(); + if (static_branch_likely(&parisc_has_dcache)) + flush_data_cache_local(NULL); } -void -flush_instruction_cache(void) + +void flush_cache_all_local(void) { - on_each_cpu(flush_instruction_cache_local, NULL, 1); + cache_flush_local_cpu(NULL); } -#endif -void -flush_cache_all_local(void) +void flush_cache_all(void) { - flush_instruction_cache_local(NULL); - flush_data_cache_local(NULL); + if (static_branch_likely(&parisc_has_cache)) + on_each_cpu(cache_flush_local_cpu, NULL, 1); } -EXPORT_SYMBOL(flush_cache_all_local); + +static inline void flush_data_cache(void) +{ + if (static_branch_likely(&parisc_has_dcache)) + on_each_cpu(flush_data_cache_local, NULL, 1); +} + /* Virtual address of pfn. */ #define pfn_va(pfn) __va(PFN_PHYS(pfn)) @@ -303,6 +315,8 @@ static inline void __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long physaddr) { + if (!static_branch_likely(&parisc_has_cache)) + return; preempt_disable(); flush_dcache_page_asm(physaddr, vmaddr); if (vma->vm_flags & VM_EXEC) @@ -314,6 +328,8 @@ static inline void __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long physaddr) { + if (!static_branch_likely(&parisc_has_cache)) + return; preempt_disable(); purge_dcache_page_asm(physaddr, vmaddr); if (vma->vm_flags & VM_EXEC) @@ -375,7 +391,6 @@ EXPORT_SYMBOL(flush_dcache_page); /* Defined in arch/parisc/kernel/pacache.S */ EXPORT_SYMBOL(flush_kernel_dcache_range_asm); -EXPORT_SYMBOL(flush_data_cache_local); EXPORT_SYMBOL(flush_kernel_icache_range_asm); #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ @@ -388,7 +403,7 @@ void __init parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; unsigned long size; - unsigned long threshold; + unsigned long threshold, threshold2; alltime = mfctl(16); flush_data_cache(); @@ -403,8 +418,20 @@ void __init parisc_setup_cache_timing(void) alltime, size, rangetime); threshold = L1_CACHE_ALIGN(size * alltime / rangetime); - if (threshold > cache_info.dc_size) - threshold = cache_info.dc_size; + + /* + * The threshold computed above isn't very reliable since the + * flush times depend greatly on the percentage of dirty lines + * in the flush range. Further, the whole cache time doesn't + * include the time to refill lines that aren't in the mm/vma + * being flushed. By timing glibc build and checks on mako cpus, + * the following formula seems to work reasonably well. The + * value from the timing calculation is too small, and increases + * build and check times by almost a factor two. + */ + threshold2 = cache_info.dc_size * num_online_cpus(); + if (threshold2 > threshold) + threshold = threshold2; if (threshold) parisc_cache_flush_threshold = threshold; printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", @@ -457,7 +484,7 @@ void flush_kernel_dcache_page_addr(void *addr) flush_kernel_dcache_page_asm(addr); purge_tlb_start(flags); - pdtlb_kernel(addr); + pdtlb(SR_KERNEL, addr); purge_tlb_end(flags); } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); @@ -496,25 +523,15 @@ int __flush_tlb_range(unsigned long sid, unsigned long start, but cause a purge request to be broadcast to other TLBs. */ while (start < end) { purge_tlb_start(flags); - mtsp(sid, 1); - pdtlb(start); - pitlb(start); + mtsp(sid, SR_TEMP1); + pdtlb(SR_TEMP1, start); + pitlb(SR_TEMP1, start); purge_tlb_end(flags); start += PAGE_SIZE; } return 0; } -static void cacheflush_h_tmp_function(void *dummy) -{ - flush_cache_all_local(); -} - -void flush_cache_all(void) -{ - on_each_cpu(cacheflush_h_tmp_function, NULL, 1); -} - static inline unsigned long mm_total_size(struct mm_struct *mm) { struct vm_area_struct *vma; @@ -558,15 +575,6 @@ static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm, } } -static void flush_user_cache_tlb(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - flush_user_dcache_range_asm(start, end); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(start, end); - flush_tlb_range(vma, start, end); -} - void flush_cache_mm(struct mm_struct *mm) { struct vm_area_struct *vma; @@ -575,23 +583,14 @@ void flush_cache_mm(struct mm_struct *mm) rp3440, etc. So, avoid it if the mm isn't too big. */ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && mm_total_size(mm) >= parisc_cache_flush_threshold) { - if (mm->context) + if (mm->context.space_id) flush_tlb_all(); flush_cache_all(); return; } - preempt_disable(); - if (mm->context == mfsp(3)) { - for (vma = mm->mmap; vma; vma = vma->vm_next) - flush_user_cache_tlb(vma, vma->vm_start, vma->vm_end); - preempt_enable(); - return; - } - for (vma = mm->mmap; vma; vma = vma->vm_next) flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end); - preempt_enable(); } void flush_cache_range(struct vm_area_struct *vma, @@ -599,29 +598,21 @@ void flush_cache_range(struct vm_area_struct *vma, { if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && end - start >= parisc_cache_flush_threshold) { - if (vma->vm_mm->context) + if (vma->vm_mm->context.space_id) flush_tlb_range(vma, start, end); flush_cache_all(); return; } - preempt_disable(); - if (vma->vm_mm->context == mfsp(3)) { - flush_user_cache_tlb(vma, start, end); - preempt_enable(); - return; - } - - flush_cache_pages(vma, vma->vm_mm, vma->vm_start, vma->vm_end); - preempt_enable(); + flush_cache_pages(vma, vma->vm_mm, start, end); } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { if (pfn_valid(pfn)) { - if (likely(vma->vm_mm->context)) { - flush_tlb_page(vma, vmaddr); + flush_tlb_page(vma, vmaddr); + if (likely(vma->vm_mm->context.space_id)) { __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } else { __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); @@ -633,6 +624,7 @@ void flush_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; + unsigned long flags, physaddr; if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && (unsigned long)size >= parisc_cache_flush_threshold) { @@ -641,8 +633,14 @@ void flush_kernel_vmap_range(void *vaddr, int size) return; } - flush_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + while (start < end) { + physaddr = lpa(start); + purge_tlb_start(flags); + pdtlb(SR_KERNEL, start); + purge_tlb_end(flags); + flush_dcache_page_asm(physaddr, start); + start += PAGE_SIZE; + } } EXPORT_SYMBOL(flush_kernel_vmap_range); @@ -650,6 +648,7 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; + unsigned long flags, physaddr; if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && (unsigned long)size >= parisc_cache_flush_threshold) { @@ -658,7 +657,13 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) return; } - purge_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + while (start < end) { + physaddr = lpa(start); + purge_tlb_start(flags); + pdtlb(SR_KERNEL, start); + purge_tlb_end(flags); + purge_dcache_page_asm(physaddr, start); + start += PAGE_SIZE; + } } EXPORT_SYMBOL(invalidate_kernel_vmap_range); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 6e9cdb269862..ecf50159359e 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -1288,74 +1288,12 @@ nadtlb_check_alias_20: nadtlb_emulate: /* - * Non access misses can be caused by fdc,fic,pdc,lpa,probe and - * probei instructions. We don't want to fault for these - * instructions (not only does it not make sense, it can cause - * deadlocks, since some flushes are done with the mmap - * semaphore held). If the translation doesn't exist, we can't - * insert a translation, so have to emulate the side effects - * of the instruction. Since we don't insert a translation - * we can get a lot of faults during a flush loop, so it makes - * sense to try to do it here with minimum overhead. We only - * emulate fdc,fic,pdc,probew,prober instructions whose base - * and index registers are not shadowed. We defer everything - * else to the "slow" path. + * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and + * probei instructions. The kernel no longer faults doing flushes. + * Use of lpa and probe instructions is rare. Given the issue + * with shadow registers, we defer everything to the "slow" path. */ - - mfctl %cr19,%r9 /* Get iir */ - - /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. - Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ - - /* Checks for fdc,fdce,pdc,"fic,4f" only */ - ldi 0x280,%r16 - and %r9,%r16,%r17 - cmpb,<>,n %r16,%r17,nadtlb_probe_check - bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ - BL get_register,%r25 - extrw,u %r9,15,5,%r8 /* Get index register # */ - cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ - copy %r1,%r24 - BL get_register,%r25 - extrw,u %r9,10,5,%r8 /* Get base register # */ - cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ - BL set_register,%r25 - add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ - -nadtlb_nullify: - mfctl %ipsw,%r8 - ldil L%PSW_N,%r9 - or %r8,%r9,%r8 /* Set PSW_N */ - mtctl %r8,%ipsw - - rfir - nop - - /* - When there is no translation for the probe address then we - must nullify the insn and return zero in the target register. - This will indicate to the calling code that it does not have - write/read privileges to this address. - - This should technically work for prober and probew in PA 1.1, - and also probe,r and probe,w in PA 2.0 - - WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! - THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. - - */ -nadtlb_probe_check: - ldi 0x80,%r16 - and %r9,%r16,%r17 - cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ - BL get_register,%r25 /* Find the target register */ - extrw,u %r9,31,5,%r8 /* Get target register */ - cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ - BL set_register,%r25 - copy %r0,%r1 /* Write zero to target register */ - b nadtlb_nullify /* Nullify return insn */ - nop - + b,n nadtlb_fault #ifdef CONFIG_64BIT itlb_miss_20w: diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c index e2bdb5a5f93e..3343d2fb7889 100644 --- a/arch/parisc/kernel/kprobes.c +++ b/arch/parisc/kernel/kprobes.c @@ -5,6 +5,7 @@ * PA-RISC kprobes implementation * * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> + * Copyright (c) 2022 Helge Deller <deller@gmx.de> */ #include <linux/types.h> @@ -25,9 +26,14 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) if (!p->ainsn.insn) return -ENOMEM; - memcpy(p->ainsn.insn, p->addr, - MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + /* + * Set up new instructions. Second break instruction will + * trigger call of parisc_kprobe_ss_handler(). + */ p->opcode = *p->addr; + p->ainsn.insn[0] = p->opcode; + p->ainsn.insn[1] = PARISC_KPROBES_BREAK_INSN2; + flush_insn_slot(p); return 0; } @@ -73,9 +79,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, { kcb->iaoq[0] = regs->iaoq[0]; kcb->iaoq[1] = regs->iaoq[1]; - regs->iaoq[0] = (unsigned long)p->ainsn.insn; - mtctl(0, 0); - regs->gr[0] |= PSW_R; + instruction_pointer_set(regs, (unsigned long)p->ainsn.insn); } int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs) @@ -165,9 +169,8 @@ int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs) regs->iaoq[0] = kcb->iaoq[1]; break; default: - regs->iaoq[1] = kcb->iaoq[0]; - regs->iaoq[1] += (regs->iaoq[1] - regs->iaoq[0]) + 4; regs->iaoq[0] = kcb->iaoq[1]; + regs->iaoq[1] = regs->iaoq[0] + 4; break; } kcb->kprobe_status = KPROBE_HIT_SSDONE; @@ -191,14 +194,17 @@ static struct kprobe trampoline_p = { static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - unsigned long orig_ret_address; - - orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); - instruction_pointer_set(regs, orig_ret_address); + __kretprobe_trampoline_handler(regs, NULL); return 1; } +void arch_kretprobe_fixup_return(struct pt_regs *regs, + kprobe_opcode_t *correct_ret_addr) +{ + regs->gr[2] = (unsigned long)correct_ret_addr; +} + void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 36a57aa38e87..160996f2198e 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -91,7 +91,7 @@ static inline int map_pte_uncached(pte_t * pte, printk(KERN_ERR "map_pte_uncached: page already exists\n"); purge_tlb_start(flags); set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); - pdtlb_kernel(orig_vaddr); + pdtlb(SR_KERNEL, orig_vaddr); purge_tlb_end(flags); vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE; @@ -175,7 +175,7 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, pte_clear(&init_mm, vaddr, pte); purge_tlb_start(flags); - pdtlb_kernel(orig_vaddr); + pdtlb(SR_KERNEL, orig_vaddr); purge_tlb_end(flags); vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE; diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 46b1050640b8..24443908f905 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -1,16 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 /* - * linux/arch/parisc/kernel/signal.c: Architecture-specific signal - * handling support. + * PA-RISC architecture-specific signal handling support. * * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> * Copyright (C) 2000 Linuxcare, Inc. + * Copyright (C) 2000-2022 Helge Deller <deller@gmx.de> + * Copyright (C) 2022 John David Anglin <dave.anglin@bell.net> * * Based on the ia64, i386, and alpha versions. - * - * Like the IA-64, we are a recent enough port (we are *starting* - * with glibc2.2) that we do not need to support the old non-realtime - * Linux signals. Therefore we don't. */ #include <linux/sched.h> @@ -32,6 +29,7 @@ #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/asm-offsets.h> +#include <asm/vdso.h> #ifdef CONFIG_COMPAT #include "signal32.h" @@ -59,14 +57,6 @@ * Do a signal return - restore sigcontext. */ -/* Trampoline for calling rt_sigreturn() */ -#define INSN_LDI_R25_0 0x34190000 /* ldi 0,%r25 (in_syscall=0) */ -#define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */ -#define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */ -#define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */ -/* For debugging */ -#define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */ - static long restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { @@ -77,9 +67,9 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq)); err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq)); err |= __get_user(regs->sar, &sc->sc_sar); - DBG(2,"restore_sigcontext: iaoq is %#lx / %#lx\n", - regs->iaoq[0],regs->iaoq[1]); - DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]); + DBG(2, "%s: iaoq is %#lx / %#lx\n", + __func__, regs->iaoq[0], regs->iaoq[1]); + DBG(2, "%s: r28 is %ld\n", __func__, regs->gr[28]); return err; } @@ -102,7 +92,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) /* Unwind the user stack to get the rt_sigframe structure. */ frame = (struct rt_sigframe __user *) (usp - sigframe_size); - DBG(2,"sys_rt_sigreturn: frame is %p\n", frame); + DBG(2, "%s: frame is %p pid %d\n", __func__, frame, task_pid_nr(current)); regs->orig_r28 = 1; /* no restarts for sigreturn */ @@ -110,7 +100,6 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) compat_frame = (struct compat_rt_sigframe __user *)frame; if (is_compat_task()) { - DBG(2,"sys_rt_sigreturn: ELF32 process.\n"); if (get_compat_sigset(&set, &compat_frame->uc.uc_sigmask)) goto give_sigsegv; } else @@ -125,25 +114,25 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) /* Good thing we saved the old gr[30], eh? */ #ifdef CONFIG_64BIT if (is_compat_task()) { - DBG(1,"sys_rt_sigreturn: compat_frame->uc.uc_mcontext 0x%p\n", - &compat_frame->uc.uc_mcontext); + DBG(1, "%s: compat_frame->uc.uc_mcontext 0x%p\n", + __func__, &compat_frame->uc.uc_mcontext); // FIXME: Load upper half from register file if (restore_sigcontext32(&compat_frame->uc.uc_mcontext, &compat_frame->regs, regs)) goto give_sigsegv; - DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n", - usp, &compat_frame->uc.uc_stack); + DBG(1, "%s: usp %#08lx stack 0x%p\n", + __func__, usp, &compat_frame->uc.uc_stack); if (compat_restore_altstack(&compat_frame->uc.uc_stack)) goto give_sigsegv; } else #endif { - DBG(1,"sys_rt_sigreturn: frame->uc.uc_mcontext 0x%p\n", - &frame->uc.uc_mcontext); + DBG(1, "%s: frame->uc.uc_mcontext 0x%p\n", + __func__, &frame->uc.uc_mcontext); if (restore_sigcontext(&frame->uc.uc_mcontext, regs)) goto give_sigsegv; - DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n", - usp, &frame->uc.uc_stack); + DBG(1, "%s: usp %#08lx stack 0x%p\n", + __func__, usp, &frame->uc.uc_stack); if (restore_altstack(&frame->uc.uc_stack)) goto give_sigsegv; } @@ -155,14 +144,11 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) */ if (in_syscall) regs->gr[31] = regs->iaoq[0]; -#if DEBUG_SIG - DBG(1,"sys_rt_sigreturn: returning to %#lx, DUMPING REGS:\n", regs->iaoq[0]); - show_regs(regs); -#endif + return; give_sigsegv: - DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n"); + DBG(1, "%s: Sending SIGSEGV\n", __func__); force_sig(SIGSEGV); return; } @@ -177,15 +163,15 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) /*FIXME: ELF32 vs. ELF64 has different frame_size, but since we don't use the parameter it doesn't matter */ - DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n", - (unsigned long)ka, sp, frame_size); + DBG(1, "%s: ka = %#lx, sp = %#lx, frame_size = %zu\n", + __func__, (unsigned long)ka, sp, frame_size); /* Align alternate stack and reserve 64 bytes for the signal handler's frame marker. */ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */ - DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp); + DBG(1, "%s: Returning sp = %#lx\n", __func__, (unsigned long)sp); return (void __user *) sp; /* Stacks grow up. Fun. */ } @@ -205,20 +191,20 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_sysc err |= __put_user(regs->gr[31]+4, &sc->sc_iaoq[1]); err |= __put_user(regs->sr[3], &sc->sc_iasq[0]); err |= __put_user(regs->sr[3], &sc->sc_iasq[1]); - DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (in syscall)\n", - regs->gr[31], regs->gr[31]+4); + DBG(1, "%s: iaoq %#lx / %#lx (in syscall)\n", + __func__, regs->gr[31], regs->gr[31]+4); } else { err |= __copy_to_user(sc->sc_iaoq, regs->iaoq, sizeof(regs->iaoq)); err |= __copy_to_user(sc->sc_iasq, regs->iasq, sizeof(regs->iasq)); - DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (not in syscall)\n", - regs->iaoq[0], regs->iaoq[1]); + DBG(1, "%s: iaoq %#lx / %#lx (not in syscall)\n", + __func__, regs->iaoq[0], regs->iaoq[1]); } err |= __put_user(flags, &sc->sc_flags); err |= __copy_to_user(sc->sc_gr, regs->gr, sizeof(regs->gr)); err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr)); err |= __put_user(regs->sar, &sc->sc_sar); - DBG(1,"setup_sigcontext: r28 is %ld\n", regs->gr[28]); + DBG(1, "%s: r28 is %ld\n", __func__, regs->gr[28]); return err; } @@ -230,7 +216,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, struct rt_sigframe __user *frame; unsigned long rp, usp; unsigned long haddr, sigframe_size; - unsigned long start, end; + unsigned long start; int err = 0; #ifdef CONFIG_64BIT struct compat_rt_sigframe __user * compat_frame; @@ -247,8 +233,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, #endif frame = get_sigframe(&ksig->ka, usp, sigframe_size); - DBG(1,"SETUP_RT_FRAME: START\n"); - DBG(1,"setup_rt_frame: frame %p info %p\n", frame, ksig->info); + DBG(1, "%s: frame %p info %p\n", __func__, frame, &ksig->info); start = (unsigned long) frame; if (start >= user_addr_max() - sigframe_size) @@ -259,11 +244,12 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, compat_frame = (struct compat_rt_sigframe __user *)frame; if (is_compat_task()) { - DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info); + DBG(1, "%s: frame->info = 0x%p\n", __func__, &compat_frame->info); err |= copy_siginfo_to_user32(&compat_frame->info, &ksig->info); err |= __compat_save_altstack( &compat_frame->uc.uc_stack, regs->gr[30]); - DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc); - DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext); + DBG(1, "%s: frame->uc = 0x%p\n", __func__, &compat_frame->uc); + DBG(1, "%s: frame->uc.uc_mcontext = 0x%p\n", + __func__, &compat_frame->uc.uc_mcontext); err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext, &compat_frame->regs, regs, in_syscall); err |= put_compat_sigset(&compat_frame->uc.uc_sigmask, set, @@ -271,11 +257,12 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, } else #endif { - DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info); + DBG(1, "%s: frame->info = 0x%p\n", __func__, &frame->info); err |= copy_siginfo_to_user(&frame->info, &ksig->info); err |= __save_altstack(&frame->uc.uc_stack, regs->gr[30]); - DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc); - DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext); + DBG(1, "%s: frame->uc = 0x%p\n", __func__, &frame->uc); + DBG(1, "%s: frame->uc.uc_mcontext = 0x%p\n", + __func__, &frame->uc.uc_mcontext); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall); /* FIXME: Should probably be converted as well for the compat case */ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); @@ -284,32 +271,15 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, if (err) return -EFAULT; - /* Set up to return from userspace. If provided, use a stub - already in userspace. The first words of tramp are used to - save the previous sigrestartblock trampoline that might be - on the stack. We start the sigreturn trampoline at - SIGRESTARTBLOCK_TRAMP+X. */ - err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0, - &frame->tramp[SIGRESTARTBLOCK_TRAMP+0]); - err |= __put_user(INSN_LDI_R20, - &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]); - err |= __put_user(INSN_BLE_SR2_R0, - &frame->tramp[SIGRESTARTBLOCK_TRAMP+2]); - err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]); - - start = (unsigned long) &frame->tramp[0]; - end = (unsigned long) &frame->tramp[TRAMP_SIZE]; - flush_user_dcache_range_asm(start, end); - flush_user_icache_range_asm(start, end); - - /* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP - * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP - * So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP - */ - rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP]; +#ifdef CONFIG_64BIT + if (!is_compat_task()) + rp = VDSO64_SYMBOL(current, sigtramp_rt); + else +#endif + rp = VDSO32_SYMBOL(current, sigtramp_rt); - if (err) - return -EFAULT; + if (in_syscall) + rp += 4*4; /* skip 4 instructions and start at ldi 1,%r25 */ haddr = A(ksig->ka.sa.sa_handler); /* The sa_handler may be a pointer to a function descriptor */ @@ -340,8 +310,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, haddr = fdesc.addr; regs->gr[19] = fdesc.gp; - DBG(1,"setup_rt_frame: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n", - haddr, regs->gr[19], in_syscall); + DBG(1, "%s: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n", + __func__, haddr, regs->gr[19], in_syscall); } #endif @@ -351,7 +321,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, regs->gr[31] = haddr; #ifdef CONFIG_64BIT if (!test_thread_flag(TIF_32BIT)) - sigframe_size |= 1; + sigframe_size |= 1; /* XXX ???? */ #endif } else { unsigned long psw = USER_PSW; @@ -373,11 +343,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, } regs->gr[0] = psw; - regs->iaoq[0] = haddr | 3; + regs->iaoq[0] = haddr | PRIV_USER; regs->iaoq[1] = regs->iaoq[0] + 4; } - regs->gr[2] = rp; /* userland return pointer */ + regs->gr[2] = rp; /* userland return pointer */ regs->gr[26] = ksig->sig; /* signal number */ #ifdef CONFIG_64BIT @@ -391,15 +361,15 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, regs->gr[24] = A(&frame->uc); /* ucontext pointer */ } - DBG(1,"setup_rt_frame: making sigreturn frame: %#lx + %#lx = %#lx\n", + DBG(1, "%s: making sigreturn frame: %#lx + %#lx = %#lx\n", __func__, regs->gr[30], sigframe_size, regs->gr[30] + sigframe_size); /* Raise the user stack pointer to make a proper call frame. */ regs->gr[30] = (A(frame) + sigframe_size); - DBG(1,"setup_rt_frame: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n", - current->comm, current->pid, frame, regs->gr[30], + DBG(1, "%s: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n", + __func__, current->comm, current->pid, frame, regs->gr[30], regs->iaoq[0], regs->iaoq[1], rp); return 0; @@ -415,8 +385,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall) int ret; sigset_t *oldset = sigmask_to_save(); - DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n", - ksig->sig, ksig->ka, ksig->info, oldset, regs); + DBG(1, "%s: sig=%d, ka=%p, info=%p, oldset=%p, regs=%p\n", + __func__, ksig->sig, &ksig->ka, &ksig->info, oldset, regs); /* Set up the stack frame */ ret = setup_rt_frame(ksig, oldset, regs, in_syscall); @@ -424,8 +394,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall) signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP) || test_thread_flag(TIF_BLOCKSTEP)); - DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n", - regs->gr[28]); + DBG(1, "%s: Exit (success), regs->gr[28] = %ld\n", + __func__, regs->gr[28]); } /* @@ -483,21 +453,27 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) if (regs->orig_r28) return; regs->orig_r28 = 1; /* no more restarts */ + + DBG(1, "%s: orig_r28 = %ld pid %d r20 %ld\n", + __func__, regs->orig_r28, task_pid_nr(current), regs->gr[20]); + /* Check the return code */ switch (regs->gr[28]) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: - DBG(1,"ERESTARTNOHAND: returning -EINTR\n"); + DBG(1, "%s: ERESTARTNOHAND: returning -EINTR\n", __func__); regs->gr[28] = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { - DBG(1,"ERESTARTSYS: putting -EINTR\n"); + DBG(1, "%s: ERESTARTSYS: putting -EINTR pid %d\n", + __func__, task_pid_nr(current)); regs->gr[28] = -EINTR; break; } fallthrough; case -ERESTARTNOINTR: + DBG(1, "%s: %ld\n", __func__, regs->gr[28]); check_syscallno_in_delay_branch(regs); break; } @@ -509,50 +485,52 @@ insert_restart_trampoline(struct pt_regs *regs) if (regs->orig_r28) return; regs->orig_r28 = 1; /* no more restarts */ - switch(regs->gr[28]) { + + DBG(2, "%s: gr28 = %ld pid %d\n", + __func__, regs->gr[28], task_pid_nr(current)); + + switch (regs->gr[28]) { case -ERESTART_RESTARTBLOCK: { /* Restart the system call - no handlers present */ unsigned int *usp = (unsigned int *)regs->gr[30]; - unsigned long start = (unsigned long) &usp[2]; - unsigned long end = (unsigned long) &usp[5]; + unsigned long rp; long err = 0; /* check that we don't exceed the stack */ if (A(&usp[0]) >= user_addr_max() - 5 * sizeof(int)) return; - /* Setup a trampoline to restart the syscall - * with __NR_restart_syscall + /* Call trampoline in vdso to restart the syscall + * with __NR_restart_syscall. + * Original return addresses are on stack like this: * * 0: <return address (orig r31)> * 4: <2nd half for 64-bit> - * 8: ldw 0(%sp), %r31 - * 12: be 0x100(%sr2, %r0) - * 16: ldi __NR_restart_syscall, %r20 */ #ifdef CONFIG_64BIT - err |= put_user(regs->gr[31] >> 32, &usp[0]); - err |= put_user(regs->gr[31] & 0xffffffff, &usp[1]); - err |= put_user(0x0fc010df, &usp[2]); -#else - err |= put_user(regs->gr[31], &usp[0]); - err |= put_user(0x0fc0109f, &usp[2]); + if (!is_compat_task()) { + err |= put_user(regs->gr[31] >> 32, &usp[0]); + err |= put_user(regs->gr[31] & 0xffffffff, &usp[1]); + rp = VDSO64_SYMBOL(current, restart_syscall); + } else #endif - err |= put_user(0xe0008200, &usp[3]); - err |= put_user(0x34140000, &usp[4]); - + { + err |= put_user(regs->gr[31], &usp[0]); + rp = VDSO32_SYMBOL(current, restart_syscall); + } WARN_ON(err); - /* flush data/instruction cache for new insns */ - flush_user_dcache_range_asm(start, end); - flush_user_icache_range_asm(start, end); - - regs->gr[31] = regs->gr[30] + 8; + regs->gr[31] = rp; + DBG(1, "%s: ERESTART_RESTARTBLOCK\n", __func__); return; } + case -EINTR: + /* ok, was handled before and should be returned. */ + break; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: + DBG(1, "%s: Type %ld\n", __func__, regs->gr[28]); check_syscallno_in_delay_branch(regs); return; default: @@ -567,30 +545,35 @@ insert_restart_trampoline(struct pt_regs *regs) * registers). As noted below, the syscall number gets restored for * us due to the magic of delayed branching. */ -asmlinkage void -do_signal(struct pt_regs *regs, long in_syscall) +static void do_signal(struct pt_regs *regs, long in_syscall) { struct ksignal ksig; + int restart_syscall; + bool has_handler; - DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n", - regs, regs->sr[7], in_syscall); + has_handler = get_signal(&ksig); - if (get_signal(&ksig)) { - DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]); + restart_syscall = 0; + if (in_syscall) + restart_syscall = 1; + + if (has_handler) { /* Restart a system call if necessary. */ - if (in_syscall) + if (restart_syscall) syscall_restart(regs, &ksig.ka); handle_signal(&ksig, regs, in_syscall); + DBG(1, "%s: Handled signal pid %d\n", + __func__, task_pid_nr(current)); return; } - /* Did we come from a system call? */ - if (in_syscall) + /* Do we need to restart the system call? */ + if (restart_syscall) insert_restart_trampoline(regs); - DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n", - regs->gr[28]); + DBG(1, "%s: Exit (not delivered), regs->gr[28] = %ld orig_r28 = %ld pid %d\n", + __func__, regs->gr[28], regs->orig_r28, task_pid_nr(current)); restore_saved_sigmask(); } diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h index f166250f2d06..c03eb1ed4c53 100644 --- a/arch/parisc/kernel/signal32.h +++ b/arch/parisc/kernel/signal32.h @@ -36,21 +36,12 @@ struct compat_regfile { compat_int_t rf_sar; }; -#define COMPAT_SIGRETURN_TRAMP 4 -#define COMPAT_SIGRESTARTBLOCK_TRAMP 5 -#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \ - COMPAT_SIGRESTARTBLOCK_TRAMP) - struct compat_rt_sigframe { - /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c - Secondary to that it must protect the ERESTART_RESTARTBLOCK - trampoline we left on the stack (we were bad and didn't - change sp so we could run really fast.) */ - compat_uint_t tramp[COMPAT_TRAMP_SIZE]; - compat_siginfo_t info; - struct compat_ucontext uc; - /* Hidden location of truncated registers, *must* be last. */ - struct compat_regfile regs; + unsigned int tramp[2]; /* holds original return address */ + compat_siginfo_t info; + struct compat_ucontext uc; + /* Hidden location of truncated registers, *must* be last. */ + struct compat_regfile regs; }; /* diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c index 0a10e4ddc528..e88a6ce7c96d 100644 --- a/arch/parisc/kernel/topology.c +++ b/arch/parisc/kernel/topology.c @@ -101,8 +101,8 @@ void __init store_cpu_topology(unsigned int cpuid) update_siblings_masks(cpuid); - pr_info("CPU%u: thread %d, cpu %d, socket %d\n", - cpuid, cpu_topology[cpuid].thread_id, + pr_info("CPU%u: cpu core %d of socket %d\n", + cpuid, cpu_topology[cpuid].core_id, cpu_topology[cpuid].socket_id); } diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index b6fdebddc8e9..a6e61cf2cad0 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -302,7 +302,10 @@ static void handle_break(struct pt_regs *regs) parisc_kprobe_break_handler(regs); return; } - + if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2)) { + parisc_kprobe_ss_handler(regs); + return; + } #endif #ifdef CONFIG_KGDB @@ -539,11 +542,6 @@ void notrace handle_interruption(int code, struct pt_regs *regs) /* Recovery counter trap */ regs->gr[0] &= ~PSW_R; -#ifdef CONFIG_KPROBES - if (parisc_kprobe_ss_handler(regs)) - return; -#endif - #ifdef CONFIG_KGDB if (kgdb_single_step) { kgdb_handle_exception(0, SIGTRAP, 0, regs); @@ -662,6 +660,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) by hand. Technically we need to emulate: fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ + if (code == 17 && handle_nadtlb_fault(regs)) + return; fault_address = regs->ior; fault_space = regs->isr; break; diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 237d20dd5622..ed1e88a74dc4 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -3,14 +3,11 @@ * Unaligned memory access handler * * Copyright (C) 2001 Randolph Chung <tausq@debian.org> + * Copyright (C) 2022 Helge Deller <deller@gmx.de> * Significantly tweaked by LaMont Jones <lamont@debian.org> */ -#include <linux/jiffies.h> -#include <linux/kernel.h> -#include <linux/module.h> #include <linux/sched/signal.h> -#include <linux/sched/debug.h> #include <linux/signal.h> #include <linux/ratelimit.h> #include <linux/uaccess.h> @@ -25,18 +22,7 @@ #define DPRINTF(fmt, args...) #endif -#ifdef CONFIG_64BIT -#define RFMT "%016lx" -#else -#define RFMT "%08lx" -#endif - -#define FIXUP_BRANCH(lbl) \ - "\tldil L%%" #lbl ", %%r1\n" \ - "\tldo R%%" #lbl "(%%r1), %%r1\n" \ - "\tbv,n %%r0(%%r1)\n" -/* If you use FIXUP_BRANCH, then you must list this clobber */ -#define FIXUP_BRANCH_CLOBBER "r1" +#define RFMT "%#08lx" /* 1111 1100 0000 0000 0001 0011 1100 0000 */ #define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6) @@ -114,37 +100,30 @@ #define IM14(i) IM((i),14) #define ERR_NOTHANDLED -1 -#define ERR_PAGEFAULT -2 int unaligned_enabled __read_mostly = 1; static int emulate_ldh(struct pt_regs *regs, int toreg) { unsigned long saddr = regs->ior; - unsigned long val = 0; - int ret; + unsigned long val = 0, temp1; + ASM_EXCEPTIONTABLE_VAR(ret); DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n", regs->isr, regs->ior, toreg); __asm__ __volatile__ ( " mtsp %4, %%sr1\n" -"1: ldbs 0(%%sr1,%3), %%r20\n" +"1: ldbs 0(%%sr1,%3), %2\n" "2: ldbs 1(%%sr1,%3), %0\n" -" depw %%r20, 23, 24, %0\n" -" copy %%r0, %1\n" +" depw %2, 23, 24, %0\n" "3: \n" -" .section .fixup,\"ax\"\n" -"4: ldi -2, %1\n" - FIXUP_BRANCH(3b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b, 4b) - ASM_EXCEPTIONTABLE_ENTRY(2b, 4b) - : "=r" (val), "=r" (ret) - : "0" (val), "r" (saddr), "r" (regs->isr) - : "r20", FIXUP_BRANCH_CLOBBER ); + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) + : "+r" (val), "+r" (ret), "=&r" (temp1) + : "r" (saddr), "r" (regs->isr) ); - DPRINTF("val = 0x" RFMT "\n", val); + DPRINTF("val = " RFMT "\n", val); if (toreg) regs->gr[toreg] = val; @@ -155,34 +134,28 @@ static int emulate_ldh(struct pt_regs *regs, int toreg) static int emulate_ldw(struct pt_regs *regs, int toreg, int flop) { unsigned long saddr = regs->ior; - unsigned long val = 0; - int ret; + unsigned long val = 0, temp1, temp2; + ASM_EXCEPTIONTABLE_VAR(ret); DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n", regs->isr, regs->ior, toreg); __asm__ __volatile__ ( -" zdep %3,28,2,%%r19\n" /* r19=(ofs&3)*8 */ -" mtsp %4, %%sr1\n" -" depw %%r0,31,2,%3\n" -"1: ldw 0(%%sr1,%3),%0\n" -"2: ldw 4(%%sr1,%3),%%r20\n" -" subi 32,%%r19,%%r19\n" -" mtctl %%r19,11\n" -" vshd %0,%%r20,%0\n" -" copy %%r0, %1\n" +" zdep %4,28,2,%2\n" /* r19=(ofs&3)*8 */ +" mtsp %5, %%sr1\n" +" depw %%r0,31,2,%4\n" +"1: ldw 0(%%sr1,%4),%0\n" +"2: ldw 4(%%sr1,%4),%3\n" +" subi 32,%4,%2\n" +" mtctl %2,11\n" +" vshd %0,%3,%0\n" "3: \n" -" .section .fixup,\"ax\"\n" -"4: ldi -2, %1\n" - FIXUP_BRANCH(3b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b, 4b) - ASM_EXCEPTIONTABLE_ENTRY(2b, 4b) - : "=r" (val), "=r" (ret) - : "0" (val), "r" (saddr), "r" (regs->isr) - : "r19", "r20", FIXUP_BRANCH_CLOBBER ); + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) + : "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2) + : "r" (saddr), "r" (regs->isr) ); - DPRINTF("val = 0x" RFMT "\n", val); + DPRINTF("val = " RFMT "\n", val); if (flop) ((__u32*)(regs->fr))[toreg] = val; @@ -195,16 +168,15 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) { unsigned long saddr = regs->ior; __u64 val = 0; - int ret; + ASM_EXCEPTIONTABLE_VAR(ret); DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n", regs->isr, regs->ior, toreg); -#ifdef CONFIG_PA20 -#ifndef CONFIG_64BIT - if (!flop) - return -1; -#endif + if (!IS_ENABLED(CONFIG_64BIT) && !flop) + return ERR_NOTHANDLED; + +#ifdef CONFIG_64BIT __asm__ __volatile__ ( " depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */ " mtsp %4, %%sr1\n" @@ -214,44 +186,32 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) " subi 64,%%r19,%%r19\n" " mtsar %%r19\n" " shrpd %0,%%r20,%%sar,%0\n" -" copy %%r0, %1\n" "3: \n" -" .section .fixup,\"ax\"\n" -"4: ldi -2, %1\n" - FIXUP_BRANCH(3b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,4b) - ASM_EXCEPTIONTABLE_ENTRY(2b,4b) - : "=r" (val), "=r" (ret) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) + : "=r" (val), "+r" (ret) : "0" (val), "r" (saddr), "r" (regs->isr) - : "r19", "r20", FIXUP_BRANCH_CLOBBER ); + : "r19", "r20" ); #else { - unsigned long valh=0,vall=0; + unsigned long shift, temp1; __asm__ __volatile__ ( -" zdep %5,29,2,%%r19\n" /* r19=(ofs&3)*8 */ -" mtsp %6, %%sr1\n" -" dep %%r0,31,2,%5\n" -"1: ldw 0(%%sr1,%5),%0\n" -"2: ldw 4(%%sr1,%5),%1\n" -"3: ldw 8(%%sr1,%5),%%r20\n" -" subi 32,%%r19,%%r19\n" -" mtsar %%r19\n" -" vshd %0,%1,%0\n" -" vshd %1,%%r20,%1\n" -" copy %%r0, %2\n" +" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */ +" mtsp %5, %%sr1\n" +" dep %%r0,31,2,%2\n" +"1: ldw 0(%%sr1,%2),%0\n" +"2: ldw 4(%%sr1,%2),%R0\n" +"3: ldw 8(%%sr1,%2),%4\n" +" subi 32,%3,%3\n" +" mtsar %3\n" +" vshd %0,%R0,%0\n" +" vshd %R0,%4,%R0\n" "4: \n" -" .section .fixup,\"ax\"\n" -"5: ldi -2, %2\n" - FIXUP_BRANCH(4b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,5b) - ASM_EXCEPTIONTABLE_ENTRY(2b,5b) - ASM_EXCEPTIONTABLE_ENTRY(3b,5b) - : "=r" (valh), "=r" (vall), "=r" (ret) - : "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr) - : "r19", "r20", FIXUP_BRANCH_CLOBBER ); - val=((__u64)valh<<32)|(__u64)vall; + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b) + : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) + : "r" (regs->isr) ); } #endif @@ -267,31 +227,25 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) static int emulate_sth(struct pt_regs *regs, int frreg) { - unsigned long val = regs->gr[frreg]; - int ret; + unsigned long val = regs->gr[frreg], temp1; + ASM_EXCEPTIONTABLE_VAR(ret); if (!frreg) val = 0; - DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg, + DPRINTF("store r%d (" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg, val, regs->isr, regs->ior); __asm__ __volatile__ ( -" mtsp %3, %%sr1\n" -" extrw,u %1, 23, 8, %%r19\n" -"1: stb %1, 1(%%sr1, %2)\n" -"2: stb %%r19, 0(%%sr1, %2)\n" -" copy %%r0, %0\n" +" mtsp %4, %%sr1\n" +" extrw,u %2, 23, 8, %1\n" +"1: stb %1, 0(%%sr1, %3)\n" +"2: stb %2, 1(%%sr1, %3)\n" "3: \n" -" .section .fixup,\"ax\"\n" -"4: ldi -2, %0\n" - FIXUP_BRANCH(3b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,4b) - ASM_EXCEPTIONTABLE_ENTRY(2b,4b) - : "=r" (ret) - : "r" (val), "r" (regs->ior), "r" (regs->isr) - : "r19", FIXUP_BRANCH_CLOBBER ); + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) + : "+r" (ret), "=&r" (temp1) + : "r" (val), "r" (regs->ior), "r" (regs->isr) ); return ret; } @@ -299,7 +253,7 @@ static int emulate_sth(struct pt_regs *regs, int frreg) static int emulate_stw(struct pt_regs *regs, int frreg, int flop) { unsigned long val; - int ret; + ASM_EXCEPTIONTABLE_VAR(ret); if (flop) val = ((__u32*)(regs->fr))[frreg]; @@ -308,7 +262,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) else val = 0; - DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg, + DPRINTF("store r%d (" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg, val, regs->isr, regs->ior); @@ -328,24 +282,19 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) " or %%r1, %%r21, %%r21\n" " stw %%r20,0(%%sr1,%2)\n" " stw %%r21,4(%%sr1,%2)\n" -" copy %%r0, %0\n" "3: \n" -" .section .fixup,\"ax\"\n" -"4: ldi -2, %0\n" - FIXUP_BRANCH(3b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,4b) - ASM_EXCEPTIONTABLE_ENTRY(2b,4b) - : "=r" (ret) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) + : "+r" (ret) : "r" (val), "r" (regs->ior), "r" (regs->isr) - : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); + : "r19", "r20", "r21", "r22", "r1" ); - return 0; + return ret; } static int emulate_std(struct pt_regs *regs, int frreg, int flop) { __u64 val; - int ret; + ASM_EXCEPTIONTABLE_VAR(ret); if (flop) val = regs->fr[frreg]; @@ -357,11 +306,10 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg, val, regs->isr, regs->ior); -#ifdef CONFIG_PA20 -#ifndef CONFIG_64BIT - if (!flop) - return -1; -#endif + if (!IS_ENABLED(CONFIG_64BIT) && !flop) + return ERR_NOTHANDLED; + +#ifdef CONFIG_64BIT __asm__ __volatile__ ( " mtsp %3, %%sr1\n" " depd,z %2, 60, 3, %%r19\n" @@ -378,26 +326,21 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) " or %%r1, %%r21, %%r21\n" "3: std %%r20,0(%%sr1,%2)\n" "4: std %%r21,8(%%sr1,%2)\n" -" copy %%r0, %0\n" "5: \n" -" .section .fixup,\"ax\"\n" -"6: ldi -2, %0\n" - FIXUP_BRANCH(5b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,6b) - ASM_EXCEPTIONTABLE_ENTRY(2b,6b) - ASM_EXCEPTIONTABLE_ENTRY(3b,6b) - ASM_EXCEPTIONTABLE_ENTRY(4b,6b) - : "=r" (ret) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b) + : "+r" (ret) : "r" (val), "r" (regs->ior), "r" (regs->isr) - : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); + : "r19", "r20", "r21", "r22", "r1" ); #else { unsigned long valh=(val>>32),vall=(val&0xffffffffl); __asm__ __volatile__ ( " mtsp %4, %%sr1\n" " zdep %2, 29, 2, %%r19\n" -" dep %%r0, 31, 2, %2\n" +" dep %%r0, 31, 2, %3\n" " mtsar %%r19\n" " zvdepi -2, 32, %%r19\n" "1: ldw 0(%%sr1,%3),%%r20\n" @@ -409,23 +352,18 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) " andcm %%r21, %%r19, %%r21\n" " or %1, %%r20, %1\n" " or %2, %%r21, %2\n" -"3: stw %1,0(%%sr1,%1)\n" +"3: stw %1,0(%%sr1,%3)\n" "4: stw %%r1,4(%%sr1,%3)\n" "5: stw %2,8(%%sr1,%3)\n" -" copy %%r0, %0\n" "6: \n" -" .section .fixup,\"ax\"\n" -"7: ldi -2, %0\n" - FIXUP_BRANCH(6b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,7b) - ASM_EXCEPTIONTABLE_ENTRY(2b,7b) - ASM_EXCEPTIONTABLE_ENTRY(3b,7b) - ASM_EXCEPTIONTABLE_ENTRY(4b,7b) - ASM_EXCEPTIONTABLE_ENTRY(5b,7b) - : "=r" (ret) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b) + : "+r" (ret) : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr) - : "r19", "r20", "r21", "r1", FIXUP_BRANCH_CLOBBER ); + : "r19", "r20", "r21", "r1" ); } #endif @@ -438,7 +376,6 @@ void handle_unaligned(struct pt_regs *regs) unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0; int modify = 0; int ret = ERR_NOTHANDLED; - register int flop=0; /* true if this is a flop */ __inc_irq_stat(irq_unaligned_count); @@ -450,10 +387,10 @@ void handle_unaligned(struct pt_regs *regs) if (!(current->thread.flags & PARISC_UAC_NOPRINT) && __ratelimit(&ratelimit)) { - char buf[256]; - sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n", - current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]); - printk(KERN_WARNING "%s", buf); + printk(KERN_WARNING "%s(%d): unaligned access to " RFMT + " at ip " RFMT " (iir " RFMT ")\n", + current->comm, task_pid_nr(current), regs->ior, + regs->iaoq[0], regs->iir); #ifdef DEBUG_UNALIGNED show_regs(regs); #endif @@ -547,7 +484,7 @@ void handle_unaligned(struct pt_regs *regs) ret = emulate_stw(regs, R2(regs->iir),0); break; -#ifdef CONFIG_PA20 +#ifdef CONFIG_64BIT case OPCODE_LDD_I: case OPCODE_LDDA_I: case OPCODE_LDD_S: @@ -565,13 +502,11 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_FLDWS: case OPCODE_FLDWXR: case OPCODE_FLDWSR: - flop=1; ret = emulate_ldw(regs,FR3(regs->iir),1); break; case OPCODE_FLDDX: case OPCODE_FLDDS: - flop=1; ret = emulate_ldd(regs,R3(regs->iir),1); break; @@ -579,13 +514,11 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_FSTWS: case OPCODE_FSTWXR: case OPCODE_FSTWSR: - flop=1; ret = emulate_stw(regs,FR3(regs->iir),1); break; case OPCODE_FSTDX: case OPCODE_FSTDS: - flop=1; ret = emulate_std(regs,R3(regs->iir),1); break; @@ -596,37 +529,33 @@ void handle_unaligned(struct pt_regs *regs) ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */ break; } -#ifdef CONFIG_PA20 switch (regs->iir & OPCODE2_MASK) { case OPCODE_FLDD_L: - flop=1; ret = emulate_ldd(regs,R2(regs->iir),1); break; case OPCODE_FSTD_L: - flop=1; ret = emulate_std(regs, R2(regs->iir),1); break; +#ifdef CONFIG_64BIT case OPCODE_LDD_L: ret = emulate_ldd(regs, R2(regs->iir),0); break; case OPCODE_STD_L: ret = emulate_std(regs, R2(regs->iir),0); break; - } #endif + } switch (regs->iir & OPCODE3_MASK) { case OPCODE_FLDW_L: - flop=1; - ret = emulate_ldw(regs, R2(regs->iir),0); + ret = emulate_ldw(regs, R2(regs->iir), 1); break; case OPCODE_LDW_M: - ret = emulate_ldw(regs, R2(regs->iir),1); + ret = emulate_ldw(regs, R2(regs->iir), 0); break; case OPCODE_FSTW_L: - flop=1; ret = emulate_stw(regs, R2(regs->iir),1); break; case OPCODE_STW_M: @@ -673,7 +602,7 @@ void handle_unaligned(struct pt_regs *regs) printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); die_if_kernel("Unaligned data reference", regs, 28); - if (ret == ERR_PAGEFAULT) + if (ret == -EFAULT) { force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)regs->ior); diff --git a/arch/parisc/kernel/vdso.c b/arch/parisc/kernel/vdso.c new file mode 100644 index 000000000000..63dc44c4c246 --- /dev/null +++ b/arch/parisc/kernel/vdso.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Helge Deller <deller@gmx.de> + * + * based on arch/s390/kernel/vdso.c which is + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/elf.h> +#include <linux/timekeeper_internal.h> +#include <linux/compat.h> +#include <linux/nsproxy.h> +#include <linux/time_namespace.h> +#include <linux/random.h> + +#include <asm/pgtable.h> +#include <asm/page.h> +#include <asm/sections.h> +#include <asm/vdso.h> +#include <asm/cacheflush.h> + +extern char vdso32_start, vdso32_end; +extern char vdso64_start, vdso64_end; + +static int vdso_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *vma) +{ + current->mm->context.vdso_base = vma->vm_start; + return 0; +} + +#ifdef CONFIG_64BIT +static struct vm_special_mapping vdso64_mapping = { + .name = "[vdso]", + .mremap = vdso_mremap, +}; +#endif + +static struct vm_special_mapping vdso32_mapping = { + .name = "[vdso]", + .mremap = vdso_mremap, +}; + +/* + * This is called from binfmt_elf, we create the special vma for the + * vDSO and insert it into the mm struct tree + */ +int arch_setup_additional_pages(struct linux_binprm *bprm, + int executable_stack) +{ + + unsigned long vdso_text_start, vdso_text_len, map_base; + struct vm_special_mapping *vdso_mapping; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int rc; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + +#ifdef CONFIG_64BIT + if (!is_compat_task()) { + vdso_text_len = &vdso64_end - &vdso64_start; + vdso_mapping = &vdso64_mapping; + } else +#endif + { + vdso_text_len = &vdso32_end - &vdso32_start; + vdso_mapping = &vdso32_mapping; + } + + map_base = mm->mmap_base; + if (current->flags & PF_RANDOMIZE) + map_base -= (get_random_int() & 0x1f) * PAGE_SIZE; + + vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0); + + /* VM_MAYWRITE for COW so gdb can set breakpoints */ + vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + vdso_mapping); + if (IS_ERR(vma)) { + do_munmap(mm, vdso_text_start, PAGE_SIZE, NULL); + rc = PTR_ERR(vma); + } else { + current->mm->context.vdso_base = vdso_text_start; + rc = 0; + } + + mmap_write_unlock(mm); + return rc; +} + +static struct page ** __init vdso_setup_pages(void *start, void *end) +{ + int pages = (end - start) >> PAGE_SHIFT; + struct page **pagelist; + int i; + + pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL); + if (!pagelist) + panic("%s: Cannot allocate page list for VDSO", __func__); + for (i = 0; i < pages; i++) + pagelist[i] = virt_to_page(start + i * PAGE_SIZE); + return pagelist; +} + +static int __init vdso_init(void) +{ +#ifdef CONFIG_64BIT + vdso64_mapping.pages = vdso_setup_pages(&vdso64_start, &vdso64_end); +#endif + if (IS_ENABLED(CONFIG_COMPAT) || !IS_ENABLED(CONFIG_64BIT)) + vdso32_mapping.pages = vdso_setup_pages(&vdso32_start, &vdso32_end); + return 0; +} +arch_initcall(vdso_init); diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile new file mode 100644 index 000000000000..85b1c6d261d1 --- /dev/null +++ b/arch/parisc/kernel/vdso32/Makefile @@ -0,0 +1,53 @@ +# List of files in the vdso, has to be asm only for now + +obj-vdso32 = note.o sigtramp.o restart_syscall.o + +# Build rules + +targets := $(obj-vdso32) vdso32.so +obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) + +ccflags-y := -shared -fno-common -fbuiltin -mno-fast-indirect-calls -O2 -mno-long-calls +# -march=1.1 -mschedule=7100LC +ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ + $(call ld-option, -Wl$(comma)--hash-style=sysv) +asflags-y := -D__VDSO32__ -s + +KBUILD_AFLAGS += -DBUILD_VDSO +KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING + +VDSO_LIBGCC := $(shell $(CROSS32CC) -print-libgcc-file-name) + +obj-y += vdso32_wrapper.o +extra-y += vdso32.lds +CPPFLAGS_vdso32.lds += -P -C # -U$(ARCH) + +$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE + +# Force dependency (incbin is bad) +# link rule for the .so file, .lds has to be first +$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) + $(call if_changed,vdso32ld) + +# assembly rules for the .S files +$(obj-vdso32): %.o: %.S FORCE + $(call if_changed_dep,vdso32as) + +$(obj-cvdso32): %.o: %.c FORCE + $(call if_changed_dep,vdso32cc) + +# actual build commands +quiet_cmd_vdso32ld = VDSO32L $@ + cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ +quiet_cmd_vdso32as = VDSO32A $@ + cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< +quiet_cmd_vdso32cc = VDSO32C $@ + cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -fPIC -mno-fast-indirect-calls -o $@ $< + +# Generate VDSO offsets using helper script +gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +quiet_cmd_vdsosym = VDSOSYM $@ + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ + +include/generated/vdso32-offsets.h: $(obj)/vdso32.so FORCE + $(call if_changed,vdsosym) diff --git a/arch/parisc/kernel/vdso32/gen_vdso_offsets.sh b/arch/parisc/kernel/vdso32/gen_vdso_offsets.sh new file mode 100755 index 000000000000..da39d6cff7f1 --- /dev/null +++ b/arch/parisc/kernel/vdso32/gen_vdso_offsets.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +# +# Match symbols in the DSO that look like VDSO_*; produce a header file +# of constant offsets into the shared object. +# +# Doing this inside the Makefile will break the $(filter-out) function, +# causing Kbuild to rebuild the vdso-offsets header file every time. +# +# Inspired by arm64 version. +# + +LC_ALL=C +sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso32_offset_\2\t0x\1/p' diff --git a/arch/parisc/kernel/vdso32/note.S b/arch/parisc/kernel/vdso32/note.S new file mode 100644 index 000000000000..bb350918bebd --- /dev/null +++ b/arch/parisc/kernel/vdso32/note.S @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include <linux/uts.h> +#include <linux/version.h> + +#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \ + .section name, flags; \ + .balign 4; \ + .long 1f - 0f; /* name length */ \ + .long 3f - 2f; /* data length */ \ + .long type; /* note type */ \ +0: .asciz vendor; /* vendor name */ \ +1: .balign 4; \ +2: + +#define ASM_ELF_NOTE_END \ +3: .balign 4; /* pad out section */ \ + .previous + + ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0) + .long LINUX_VERSION_CODE + ASM_ELF_NOTE_END diff --git a/arch/parisc/kernel/vdso32/restart_syscall.S b/arch/parisc/kernel/vdso32/restart_syscall.S new file mode 100644 index 000000000000..7e82008d7e40 --- /dev/null +++ b/arch/parisc/kernel/vdso32/restart_syscall.S @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Syscall restart trampoline for 32 and 64 bits processes. + * + * Copyright (C) 2018-2022 Helge Deller <deller@gmx.de> + * Copyright (C) 2022 John David Anglin <dave.anglin@bell.net> + */ + +#include <asm/unistd.h> +#include <asm/vdso.h> + +#include <linux/linkage.h> + + .text + +ENTRY_CFI(__kernel_restart_syscall) + /* + * Setup a trampoline to restart the syscall + * with __NR_restart_syscall + */ + + /* load return pointer */ +#if defined(__VDSO64__) + ldd 0(%sp), %r31 +#elif defined(__VDSO32__) + ldw 0(%sp), %r31 +#endif + + be 0x100(%sr2, %r0) + ldi __NR_restart_syscall, %r20 + +ENDPROC_CFI(__kernel_restart_syscall) diff --git a/arch/parisc/kernel/vdso32/sigtramp.S b/arch/parisc/kernel/vdso32/sigtramp.S new file mode 100644 index 000000000000..192da7077869 --- /dev/null +++ b/arch/parisc/kernel/vdso32/sigtramp.S @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Signal trampolines for 32 bit processes. + * + * Copyright (C) 2006 Randolph Chung <tausq@debian.org> + * Copyright (C) 2018-2022 Helge Deller <deller@gmx.de> + * Copyright (C) 2022 John David Anglin <dave.anglin@bell.net> + */ +#include <asm/unistd.h> +#include <linux/linkage.h> +#include <generated/asm-offsets.h> + + .text + +/* Gdb expects the trampoline is on the stack and the pc is offset from + a 64-byte boundary by 0, 4 or 5 instructions. Since the vdso trampoline + is not on the stack, we need a new variant with different offsets and + data to tell gdb where to find the signal context on the stack. + + Here we put the offset to the context data at the start of the trampoline + region and offset the first trampoline by 2 instructions. Please do + not change the trampoline as the code in gdb depends on the following + instruction sequence exactly. + */ + .align 64 + .word SIGFRAME_CONTEXT_REGS32 + +/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from + the return address to get an address in the middle of the presumed + call instruction. Since we don't have a call here, we artifically + extend the range covered by the unwind info by adding a nop before + the real start. + */ + nop + + .globl __kernel_sigtramp_rt + .type __kernel_sigtramp_rt, @function +__kernel_sigtramp_rt: + .proc + .callinfo FRAME=ASM_SIGFRAME_SIZE32,CALLS,SAVE_RP + .entry + +.Lsigrt_start = . - 4 +0: ldi 0, %r25 /* (in_syscall=0) */ + ldi __NR_rt_sigreturn, %r20 + ble 0x100(%sr2, %r0) + nop + +1: ldi 1, %r25 /* (in_syscall=1) */ + ldi __NR_rt_sigreturn, %r20 + ble 0x100(%sr2, %r0) + nop +.Lsigrt_end: + .exit + .procend + .size __kernel_sigtramp_rt,.-__kernel_sigtramp_rt + + + .section .eh_frame,"a",@progbits + +/* This is where the mcontext_t struct can be found on the stack. */ +#define PTREGS SIGFRAME_CONTEXT_REGS32 /* 32-bit process offset is -672 */ + +/* Register REGNO can be found at offset OFS of the mcontext_t structure. */ + .macro rsave regno,ofs + .byte 0x05 /* DW_CFA_offset_extended */ + .uleb128 \regno; /* regno */ + .uleb128 \ofs /* factored offset */ + .endm + +.Lcie: + .long .Lcie_end - .Lcie_start +.Lcie_start: + .long 0 /* CIE ID */ + .byte 1 /* Version number */ + .stringz "zRS" /* NUL-terminated augmentation string */ + .uleb128 4 /* Code alignment factor */ + .sleb128 4 /* Data alignment factor */ + .byte 89 /* Return address register column, iaoq[0] */ + .uleb128 1 /* Augmentation value length */ + .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ + .byte 0x0f /* DW_CFA_def_cfa_expresion */ + .uleb128 9f - 1f /* length */ +1: + .byte 0x8e /* DW_OP_breg30 */ + .sleb128 PTREGS +9: + .balign 4 +.Lcie_end: + + .long .Lfde0_end - .Lfde0_start +.Lfde0_start: + .long .Lfde0_start - .Lcie /* CIE pointer. */ + .long .Lsigrt_start - . /* PC start, length */ + .long .Lsigrt_end - .Lsigrt_start + .uleb128 0 /* Augmentation */ + + /* General registers */ + rsave 1, 2 + rsave 2, 3 + rsave 3, 4 + rsave 4, 5 + rsave 5, 6 + rsave 6, 7 + rsave 7, 8 + rsave 8, 9 + rsave 9, 10 + rsave 10, 11 + rsave 11, 12 + rsave 12, 13 + rsave 13, 14 + rsave 14, 15 + rsave 15, 16 + rsave 16, 17 + rsave 17, 18 + rsave 18, 19 + rsave 19, 20 + rsave 20, 21 + rsave 21, 22 + rsave 22, 23 + rsave 23, 24 + rsave 24, 25 + rsave 25, 26 + rsave 26, 27 + rsave 27, 28 + rsave 28, 29 + rsave 29, 30 + rsave 30, 31 + rsave 31, 32 + + /* Floating-point registers */ + rsave 32, 42 + rsave 33, 43 + rsave 34, 44 + rsave 35, 45 + rsave 36, 46 + rsave 37, 47 + rsave 38, 48 + rsave 39, 49 + rsave 40, 50 + rsave 41, 51 + rsave 42, 52 + rsave 43, 53 + rsave 44, 54 + rsave 45, 55 + rsave 46, 56 + rsave 47, 57 + rsave 48, 58 + rsave 49, 59 + rsave 50, 60 + rsave 51, 61 + rsave 52, 62 + rsave 53, 63 + rsave 54, 64 + rsave 55, 65 + rsave 56, 66 + rsave 57, 67 + rsave 58, 68 + rsave 59, 69 + rsave 60, 70 + rsave 61, 71 + rsave 62, 72 + rsave 63, 73 + rsave 64, 74 + rsave 65, 75 + rsave 66, 76 + rsave 67, 77 + rsave 68, 78 + rsave 69, 79 + rsave 70, 80 + rsave 71, 81 + rsave 72, 82 + rsave 73, 83 + rsave 74, 84 + rsave 75, 85 + rsave 76, 86 + rsave 77, 87 + rsave 78, 88 + rsave 79, 89 + rsave 80, 90 + rsave 81, 91 + rsave 82, 92 + rsave 83, 93 + rsave 84, 94 + rsave 85, 95 + rsave 86, 96 + rsave 87, 97 + + /* SAR register */ + rsave 88, 102 + + /* iaoq[0] return address register */ + rsave 89, 100 + .balign 4 +.Lfde0_end: diff --git a/arch/parisc/kernel/vdso32/vdso32.lds.S b/arch/parisc/kernel/vdso32/vdso32.lds.S new file mode 100644 index 000000000000..d4aff3af5262 --- /dev/null +++ b/arch/parisc/kernel/vdso32/vdso32.lds.S @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is the infamous ld script for the 32 bits vdso library + */ +#include <asm/vdso.h> +#include <asm/page.h> + +/* Default link addresses for the vDSOs */ +OUTPUT_FORMAT("elf32-hppa-linux") +OUTPUT_ARCH(hppa) +ENTRY(_start) + +SECTIONS +{ + . = VDSO_LBASE + SIZEOF_HEADERS; + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN (16); + .text : + { + *(.text .stub .text.* .gnu.linkonce.t.*) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + /* Other stuff is appended to the text segment: */ + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .rodata2 : { *(.data.rel.ro) } + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + .gcc_except_table : { *(.gcc_except_table) } + .fixup : { *(.fixup) } + + .dynamic : { *(.dynamic) } :text :dynamic + .plt : { *(.plt) } + .got : { *(.got) } + + _end = .; + __end = .; + PROVIDE (end = .); + + + /* Stabs debugging sections are here too + */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + + /DISCARD/ : { *(.note.GNU-stack) } + /DISCARD/ : { *(.data .data.* .gnu.linkonce.d.* .sdata*) } + /DISCARD/ : { *(.bss .sbss .dynbss .dynsbss) } +} + + +PHDRS +{ + text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ + note PT_NOTE FLAGS(4); /* PF_R */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + VDSO_VERSION_STRING { + global: + __kernel_sigtramp_rt32; + __kernel_restart_syscall32; + local: *; + }; +} diff --git a/arch/parisc/kernel/vdso32/vdso32_wrapper.S b/arch/parisc/kernel/vdso32/vdso32_wrapper.S new file mode 100644 index 000000000000..5ac06093e8ec --- /dev/null +++ b/arch/parisc/kernel/vdso32/vdso32_wrapper.S @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> +#include <asm/page.h> + + __PAGE_ALIGNED_DATA + + .globl vdso32_start, vdso32_end + .balign PAGE_SIZE +vdso32_start: + .incbin "arch/parisc/kernel/vdso32/vdso32.so" + .balign PAGE_SIZE +vdso32_end: + + .previous diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile new file mode 100644 index 000000000000..a30f5ec5eb4b --- /dev/null +++ b/arch/parisc/kernel/vdso64/Makefile @@ -0,0 +1,48 @@ +# List of files in the vdso, has to be asm only for now + +obj-vdso64 = note.o sigtramp.o restart_syscall.o + +# Build rules + +targets := $(obj-vdso64) vdso64.so +obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) + + +ccflags-y := -shared -fno-common -fno-builtin +ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ + $(call ld-option, -Wl$(comma)--hash-style=sysv) +asflags-y := -D__VDSO64__ -s + +KBUILD_AFLAGS += -DBUILD_VDSO +KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING + +VDSO_LIBGCC := $(shell $(CC) -print-libgcc-file-name) + +obj-y += vdso64_wrapper.o +extra-y += vdso64.lds +CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) + +$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE + +# Force dependency (incbin is bad) +# link rule for the .so file, .lds has to be first +$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) + $(call if_changed,vdso64ld) + +# assembly rules for the .S files +$(obj-vdso64): %.o: %.S FORCE + $(call if_changed_dep,vdso64as) + +# actual build commands +quiet_cmd_vdso64ld = VDSO64L $@ + cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ +quiet_cmd_vdso64as = VDSO64A $@ + cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< + +# Generate VDSO offsets using helper script +gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +quiet_cmd_vdsosym = VDSOSYM $@ + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ + +include/generated/vdso64-offsets.h: $(obj)/vdso64.so FORCE + $(call if_changed,vdsosym) diff --git a/arch/parisc/kernel/vdso64/gen_vdso_offsets.sh b/arch/parisc/kernel/vdso64/gen_vdso_offsets.sh new file mode 100755 index 000000000000..37f05cb38dad --- /dev/null +++ b/arch/parisc/kernel/vdso64/gen_vdso_offsets.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +# +# Match symbols in the DSO that look like VDSO_*; produce a header file +# of constant offsets into the shared object. +# +# Doing this inside the Makefile will break the $(filter-out) function, +# causing Kbuild to rebuild the vdso-offsets header file every time. +# +# Inspired by arm64 version. +# + +LC_ALL=C +sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso64_offset_\2\t0x\1/p' diff --git a/arch/parisc/kernel/vdso64/note.S b/arch/parisc/kernel/vdso64/note.S new file mode 100644 index 000000000000..bd1fa23597d6 --- /dev/null +++ b/arch/parisc/kernel/vdso64/note.S @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include "../vdso32/note.S" diff --git a/arch/parisc/kernel/vdso64/restart_syscall.S b/arch/parisc/kernel/vdso64/restart_syscall.S new file mode 100644 index 000000000000..83004451f6b1 --- /dev/null +++ b/arch/parisc/kernel/vdso64/restart_syscall.S @@ -0,0 +1,3 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include "../vdso32/restart_syscall.S" diff --git a/arch/parisc/kernel/vdso64/sigtramp.S b/arch/parisc/kernel/vdso64/sigtramp.S new file mode 100644 index 000000000000..66a6d2b241e1 --- /dev/null +++ b/arch/parisc/kernel/vdso64/sigtramp.S @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Signal trampolines for 64 bit processes. + * + * Copyright (C) 2006 Randolph Chung <tausq@debian.org> + * Copyright (C) 2018-2022 Helge Deller <deller@gmx.de> + * Copyright (C) 2022 John David Anglin <dave.anglin@bell.net> + */ +#include <asm/unistd.h> +#include <linux/linkage.h> +#include <generated/asm-offsets.h> + + .text + +/* Gdb expects the trampoline is on the stack and the pc is offset from + a 64-byte boundary by 0, 4 or 5 instructions. Since the vdso trampoline + is not on the stack, we need a new variant with different offsets and + data to tell gdb where to find the signal context on the stack. + + Here we put the offset to the context data at the start of the trampoline + region and offset the first trampoline by 2 instructions. Please do + not change the trampoline as the code in gdb depends on the following + instruction sequence exactly. + */ + .align 64 + .word SIGFRAME_CONTEXT_REGS + +/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from + the return address to get an address in the middle of the presumed + call instruction. Since we don't have a call here, we artifically + extend the range covered by the unwind info by adding a nop before + the real start. + */ + nop + + .globl __kernel_sigtramp_rt + .type __kernel_sigtramp_rt, @function +__kernel_sigtramp_rt: + .proc + .callinfo FRAME=ASM_SIGFRAME_SIZE,CALLS,SAVE_RP + .entry + +.Lsigrt_start = . - 4 +0: ldi 0, %r25 /* (in_syscall=0) */ + ldi __NR_rt_sigreturn, %r20 + ble 0x100(%sr2, %r0) + nop + +1: ldi 1, %r25 /* (in_syscall=1) */ + ldi __NR_rt_sigreturn, %r20 + ble 0x100(%sr2, %r0) + nop +.Lsigrt_end: + .exit + .procend + .size __kernel_sigtramp_rt,.-__kernel_sigtramp_rt + + .section .eh_frame,"a",@progbits + +/* This is where the mcontext_t struct can be found on the stack. */ +#define PTREGS SIGFRAME_CONTEXT_REGS /* 64-bit process offset is -720 */ + +/* Register REGNO can be found at offset OFS of the mcontext_t structure. */ + .macro rsave regno,ofs + .byte 0x05 /* DW_CFA_offset_extended */ + .uleb128 \regno; /* regno */ + .uleb128 \ofs /* factored offset */ + .endm + +.Lcie: + .long .Lcie_end - .Lcie_start +.Lcie_start: + .long 0 /* CIE ID */ + .byte 1 /* Version number */ + .stringz "zRS" /* NUL-terminated augmentation string */ + .uleb128 4 /* Code alignment factor */ + .sleb128 8 /* Data alignment factor */ + .byte 61 /* Return address register column, iaoq[0] */ + .uleb128 1 /* Augmentation value length */ + .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ + .byte 0x0f /* DW_CFA_def_cfa_expresion */ + .uleb128 9f - 1f /* length */ +1: + .byte 0x8e /* DW_OP_breg30 */ + .sleb128 PTREGS +9: + .balign 8 +.Lcie_end: + + .long .Lfde0_end - .Lfde0_start +.Lfde0_start: + .long .Lfde0_start - .Lcie /* CIE pointer. */ + .long .Lsigrt_start - . /* PC start, length */ + .long .Lsigrt_end - .Lsigrt_start + .uleb128 0 /* Augmentation */ + + /* General registers */ + rsave 1, 2 + rsave 2, 3 + rsave 3, 4 + rsave 4, 5 + rsave 5, 6 + rsave 6, 7 + rsave 7, 8 + rsave 8, 9 + rsave 9, 10 + rsave 10, 11 + rsave 11, 12 + rsave 12, 13 + rsave 13, 14 + rsave 14, 15 + rsave 15, 16 + rsave 16, 17 + rsave 17, 18 + rsave 18, 19 + rsave 19, 20 + rsave 20, 21 + rsave 21, 22 + rsave 22, 23 + rsave 23, 24 + rsave 24, 25 + rsave 25, 26 + rsave 26, 27 + rsave 27, 28 + rsave 28, 29 + rsave 29, 30 + rsave 30, 31 + rsave 31, 32 + + /* Floating-point registers */ + rsave 32, 36 + rsave 33, 37 + rsave 34, 38 + rsave 35, 39 + rsave 36, 40 + rsave 37, 41 + rsave 38, 42 + rsave 39, 43 + rsave 40, 44 + rsave 41, 45 + rsave 42, 46 + rsave 43, 47 + rsave 44, 48 + rsave 45, 49 + rsave 46, 50 + rsave 47, 51 + rsave 48, 52 + rsave 49, 53 + rsave 50, 54 + rsave 51, 55 + rsave 52, 56 + rsave 53, 57 + rsave 54, 58 + rsave 55, 59 + rsave 56, 60 + rsave 57, 61 + rsave 58, 62 + rsave 59, 63 + + /* SAR register */ + rsave 60, 67 + + /* iaoq[0] return address register */ + rsave 61, 65 + .balign 8 +.Lfde0_end: diff --git a/arch/parisc/kernel/vdso64/vdso64.lds.S b/arch/parisc/kernel/vdso64/vdso64.lds.S new file mode 100644 index 000000000000..de1fb4b19286 --- /dev/null +++ b/arch/parisc/kernel/vdso64/vdso64.lds.S @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is the infamous ld script for the 64 bits vdso library + */ +#include <asm/vdso.h> + +/* Default link addresses for the vDSOs */ +OUTPUT_FORMAT("elf64-hppa-linux") +OUTPUT_ARCH(hppa:hppa2.0w) +ENTRY(_start) + +SECTIONS +{ + . = VDSO_LBASE + SIZEOF_HEADERS; + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN (16); + .text : + { + *(.text .stub .text.* .gnu.linkonce.t.*) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + /* Other stuff is appended to the text segment: */ + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + .gcc_except_table : { *(.gcc_except_table) } + .fixup : { *(.fixup) } + + .dynamic : { *(.dynamic) } :text :dynamic + .plt : { *(.plt) } + .got : { *(.got) } + + _end = .; + __end = .; + PROVIDE (end = .); + + + /* Stabs debugging sections are here too + */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + + /DISCARD/ : { *(.note.GNU-stack) } + /DISCARD/ : { *(.data .data.* .gnu.linkonce.d.* .sdata*) } + /DISCARD/ : { *(.bss .sbss .dynbss .dynsbss) } +} + + +PHDRS +{ + text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ + note PT_NOTE FLAGS(4); /* PF_R */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + VDSO_VERSION_STRING { + global: + __kernel_sigtramp_rt64; + __kernel_restart_syscall64; + local: *; + }; +} diff --git a/arch/parisc/kernel/vdso64/vdso64_wrapper.S b/arch/parisc/kernel/vdso64/vdso64_wrapper.S new file mode 100644 index 000000000000..66f929482d3d --- /dev/null +++ b/arch/parisc/kernel/vdso64/vdso64_wrapper.S @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> +#include <asm/page.h> + + __PAGE_ALIGNED_DATA + + .globl vdso64_start, vdso64_end + .balign PAGE_SIZE +vdso64_start: + .incbin "arch/parisc/kernel/vdso64/vdso64.so" + .balign PAGE_SIZE +vdso64_end: + + .previous diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index ea70a0e08321..5fc0c852c84c 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -13,8 +13,8 @@ #include <linux/compiler.h> #include <linux/uaccess.h> -#define get_user_space() (uaccess_kernel() ? 0 : mfsp(3)) -#define get_kernel_space() (0) +#define get_user_space() mfsp(SR_USER) +#define get_kernel_space() SR_KERNEL /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ extern unsigned long pa_memcpy(void *dst, const void *src, @@ -23,8 +23,8 @@ extern unsigned long pa_memcpy(void *dst, const void *src, unsigned long raw_copy_to_user(void __user *dst, const void *src, unsigned long len) { - mtsp(get_kernel_space(), 1); - mtsp(get_user_space(), 2); + mtsp(get_kernel_space(), SR_TEMP1); + mtsp(get_user_space(), SR_TEMP2); return pa_memcpy((void __force *)dst, src, len); } EXPORT_SYMBOL(raw_copy_to_user); @@ -32,16 +32,16 @@ EXPORT_SYMBOL(raw_copy_to_user); unsigned long raw_copy_from_user(void *dst, const void __user *src, unsigned long len) { - mtsp(get_user_space(), 1); - mtsp(get_kernel_space(), 2); + mtsp(get_user_space(), SR_TEMP1); + mtsp(get_kernel_space(), SR_TEMP2); return pa_memcpy(dst, (void __force *)src, len); } EXPORT_SYMBOL(raw_copy_from_user); void * memcpy(void * dst,const void *src, size_t count) { - mtsp(get_kernel_space(), 1); - mtsp(get_kernel_space(), 2); + mtsp(get_kernel_space(), SR_TEMP1); + mtsp(get_kernel_space(), SR_TEMP2); pa_memcpy(dst, src, count); return dst; } diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index e9eabf8f14d7..f114e102aaf2 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -425,3 +425,92 @@ out_of_memory: } pagefault_out_of_memory(); } + +/* Handle non-access data TLB miss faults. + * + * For probe instructions, accesses to userspace are considered allowed + * if they lie in a valid VMA and the access type matches. We are not + * allowed to handle MM faults here so there may be situations where an + * actual access would fail even though a probe was successful. + */ +int +handle_nadtlb_fault(struct pt_regs *regs) +{ + unsigned long insn = regs->iir; + int breg, treg, xreg, val = 0; + struct vm_area_struct *vma, *prev_vma; + struct task_struct *tsk; + struct mm_struct *mm; + unsigned long address; + unsigned long acc_type; + + switch (insn & 0x380) { + case 0x280: + /* FDC instruction */ + fallthrough; + case 0x380: + /* PDC and FIC instructions */ + if (printk_ratelimit()) { + pr_warn("BUG: nullifying cache flush/purge instruction\n"); + show_regs(regs); + } + if (insn & 0x20) { + /* Base modification */ + breg = (insn >> 21) & 0x1f; + xreg = (insn >> 16) & 0x1f; + if (breg && xreg) + regs->gr[breg] += regs->gr[xreg]; + } + regs->gr[0] |= PSW_N; + return 1; + + case 0x180: + /* PROBE instruction */ + treg = insn & 0x1f; + if (regs->isr) { + tsk = current; + mm = tsk->mm; + if (mm) { + /* Search for VMA */ + address = regs->ior; + mmap_read_lock(mm); + vma = find_vma_prev(mm, address, &prev_vma); + mmap_read_unlock(mm); + + /* + * Check if access to the VMA is okay. + * We don't allow for stack expansion. + */ + acc_type = (insn & 0x40) ? VM_WRITE : VM_READ; + if (vma + && address >= vma->vm_start + && (vma->vm_flags & acc_type) == acc_type) + val = 1; + } + } + if (treg) + regs->gr[treg] = val; + regs->gr[0] |= PSW_N; + return 1; + + case 0x300: + /* LPA instruction */ + if (insn & 0x20) { + /* Base modification */ + breg = (insn >> 21) & 0x1f; + xreg = (insn >> 16) & 0x1f; + if (breg && xreg) + regs->gr[breg] += regs->gr[xreg]; + } + treg = insn & 0x1f; + if (treg) + regs->gr[treg] = 0; + regs->gr[0] |= PSW_N; + return 1; + + default: + break; + } + + return 0; +} |