diff options
author | Andy Lutomirski <luto@amacapital.net> | 2014-09-23 10:50:52 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-10-28 11:22:09 +0100 |
commit | d4f829dd9026797bd5db8715a30192f23b22afaa (patch) | |
tree | e73421df7744004de3c55f814b42d2702ccf50ab /arch/x86/vdso | |
parent | b93590901a01a6d036b3b7c856bcc5724fdb9911 (diff) | |
download | linux-d4f829dd9026797bd5db8715a30192f23b22afaa.tar.bz2 |
x86_64/vdso: Move getcpu code from vsyscall_64.c to vdso/vma.c
This is pure cut-and-paste. At this point, vsyscall_64.c
contains only code needed for vsyscall emulation, but some of
the comments and function names are still confused.
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/a244daf7d3cbe71afc08ad09fdfe1866ca1f1978.1411494540.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/vdso')
-rw-r--r-- | arch/x86/vdso/vma.c | 61 |
1 files changed, 61 insertions, 0 deletions
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 970463b566cf..a155dca5edb5 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -10,12 +10,14 @@ #include <linux/init.h> #include <linux/random.h> #include <linux/elf.h> +#include <linux/cpu.h> #include <asm/vsyscall.h> #include <asm/vgtod.h> #include <asm/proto.h> #include <asm/vdso.h> #include <asm/page.h> #include <asm/hpet.h> +#include <asm/desc.h> #if defined(CONFIG_X86_64) unsigned int __read_mostly vdso64_enabled = 1; @@ -238,3 +240,62 @@ static __init int vdso_setup(char *s) } __setup("vdso=", vdso_setup); #endif + +#ifdef CONFIG_X86_64 +/* + * Assume __initcall executes before all user space. Hopefully kmod + * doesn't violate that. We'll find out if it does. + */ +static void vsyscall_set_cpu(int cpu) +{ + unsigned long d; + unsigned long node = 0; +#ifdef CONFIG_NUMA + node = cpu_to_node(cpu); +#endif + if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) + write_rdtscp_aux((node << 12) | cpu); + + /* + * Store cpu number in limit so that it can be loaded quickly + * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node) + */ + d = 0x0f40000000000ULL; + d |= cpu; + d |= (node & 0xf) << 12; + d |= (node >> 4) << 48; + + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); +} + +static void cpu_vsyscall_init(void *arg) +{ + /* preemption should be already off */ + vsyscall_set_cpu(raw_smp_processor_id()); +} + +static int +cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) +{ + long cpu = (long)arg; + + if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) + smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); + + return NOTIFY_DONE; +} + +static int __init vsyscall_init(void) +{ + cpu_notifier_register_begin(); + + on_each_cpu(cpu_vsyscall_init, NULL, 1); + /* notifier priority > KVM */ + __hotcpu_notifier(cpu_vsyscall_notifier, 30); + + cpu_notifier_register_done(); + + return 0; +} +__initcall(vsyscall_init); +#endif |