summaryrefslogtreecommitdiffstats
path: root/include/asm-sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 18:33:49 +0900
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 18:33:49 +0900
commit19f9a34f87c48bbd270d617d1c986d0c23866a1a (patch)
tree19f32122aec9c16cbbf8e3331e81040a4850cb8d /include/asm-sh
parent8c12b5dc13bf8516303a8224ab4e9708b33d5b00 (diff)
downloadlinux-19f9a34f87c48bbd270d617d1c986d0c23866a1a.tar.bz2
sh: Initial vsyscall page support.
This implements initial support for the vsyscall page on SH. At the moment we leave it configurable due to having nommu to support from the same code base. We hook it up for the signal trampoline return at present, with more to be added later, once uClibc catches up. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh')
-rw-r--r--include/asm-sh/auxvec.h14
-rw-r--r--include/asm-sh/elf.h20
-rw-r--r--include/asm-sh/mmu.h7
-rw-r--r--include/asm-sh/mmu_context.h8
-rw-r--r--include/asm-sh/page.h5
-rw-r--r--include/asm-sh/processor.h6
6 files changed, 55 insertions, 5 deletions
diff --git a/include/asm-sh/auxvec.h b/include/asm-sh/auxvec.h
index fc21e4db5881..1b6916e63e90 100644
--- a/include/asm-sh/auxvec.h
+++ b/include/asm-sh/auxvec.h
@@ -1,4 +1,18 @@
#ifndef __ASM_SH_AUXVEC_H
#define __ASM_SH_AUXVEC_H
+/*
+ * Architecture-neutral AT_ values in 0-17, leave some room
+ * for more of them.
+ */
+
+#ifdef CONFIG_VSYSCALL
+/*
+ * Only define this in the vsyscall case, the entry point to
+ * the vsyscall page gets placed here. The kernel will attempt
+ * to build a gate VMA we don't care about otherwise..
+ */
+#define AT_SYSINFO_EHDR 33
+#endif
+
#endif /* __ASM_SH_AUXVEC_H */
diff --git a/include/asm-sh/elf.h b/include/asm-sh/elf.h
index cc8e5e767345..3a07ab40ac4d 100644
--- a/include/asm-sh/elf.h
+++ b/include/asm-sh/elf.h
@@ -121,4 +121,24 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
#endif
+#ifdef CONFIG_VSYSCALL
+/* vDSO has arch_setup_additional_pages */
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int executable_stack);
+
+extern unsigned int vdso_enabled;
+extern void __kernel_vsyscall;
+
+#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
+#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
+
+#define ARCH_DLINFO \
+do { \
+ if (vdso_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
+} while (0)
+#endif /* CONFIG_VSYSCALL */
+
#endif /* __ASM_SH_ELF_H */
diff --git a/include/asm-sh/mmu.h b/include/asm-sh/mmu.h
index 6383dc84501e..cf47df79bb94 100644
--- a/include/asm-sh/mmu.h
+++ b/include/asm-sh/mmu.h
@@ -11,7 +11,12 @@ typedef struct {
#else
/* Default "unsigned long" context */
-typedef unsigned long mm_context_t;
+typedef unsigned long mm_context_id_t;
+
+typedef struct {
+ mm_context_id_t id;
+ void *vdso;
+} mm_context_t;
#endif /* CONFIG_MMU */
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index 87678ba8d6b6..c7088efe579a 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -49,7 +49,7 @@ get_mmu_context(struct mm_struct *mm)
unsigned long mc = mmu_context_cache;
/* Check if we have old version of context. */
- if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
+ if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
/* It's up to date, do nothing */
return;
@@ -68,7 +68,7 @@ get_mmu_context(struct mm_struct *mm)
if (!mc)
mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
}
- mm->context = mc;
+ mm->context.id = mc;
}
/*
@@ -78,7 +78,7 @@ get_mmu_context(struct mm_struct *mm)
static __inline__ int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
- mm->context = NO_CONTEXT;
+ mm->context.id = NO_CONTEXT;
return 0;
}
@@ -123,7 +123,7 @@ static __inline__ unsigned long get_asid(void)
static __inline__ void activate_context(struct mm_struct *mm)
{
get_mmu_context(mm);
- set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
+ set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
}
/* MMU_TTB can be used for optimizing the fault handling.
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index acf6977b4042..3d8dae31a6f6 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -117,5 +117,10 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
+/* vDSO support */
+#ifdef CONFIG_VSYSCALL
+#define __HAVE_ARCH_GATE_AREA
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PAGE_H */
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index b7cba4e91a72..474773853cd1 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -276,5 +276,11 @@ static inline void prefetch(void *x)
#define prefetchw(x) prefetch(x)
#endif
+#ifdef CONFIG_VSYSCALL
+extern int vsyscall_init(void);
+#else
+#define vsyscall_init() do { } while (0)
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PROCESSOR_H */