diff options
Diffstat (limited to 'arch/x86/kernel/process_32.c')
-rw-r--r-- | arch/x86/kernel/process_32.c | 138 |
1 files changed, 81 insertions, 57 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index be3c7a299f02..7adad088e373 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -36,6 +36,7 @@ #include <linux/personality.h> #include <linux/tick.h> #include <linux/percpu.h> +#include <linux/prctl.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -45,7 +46,6 @@ #include <asm/processor.h> #include <asm/i387.h> #include <asm/desc.h> -#include <asm/vm86.h> #ifdef CONFIG_MATH_EMULATION #include <asm/math_emu.h> #endif @@ -82,7 +82,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) */ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); -static DEFINE_PER_CPU(unsigned int, cpu_idle_state); void disable_hlt(void) { @@ -114,20 +113,13 @@ void default_idle(void) local_irq_disable(); if (!need_resched()) { - ktime_t t0, t1; - u64 t0n, t1n; - - t0 = ktime_get(); - t0n = ktime_to_ns(t0); safe_halt(); /* enables interrupts racelessly */ local_irq_disable(); - t1 = ktime_get(); - t1n = ktime_to_ns(t1); - sched_clock_idle_wakeup_event(t1n - t0n); } local_irq_enable(); current_thread_info()->status |= TS_POLLING; } else { + local_irq_enable(); /* loop is done by the caller */ cpu_relax(); } @@ -143,6 +135,7 @@ EXPORT_SYMBOL(default_idle); */ static void poll_idle(void) { + local_irq_enable(); cpu_relax(); } @@ -190,9 +183,6 @@ void cpu_idle(void) while (!need_resched()) { void (*idle)(void); - if (__get_cpu_var(cpu_idle_state)) - __get_cpu_var(cpu_idle_state) = 0; - check_pgt_cache(); rmb(); idle = pm_idle; @@ -220,40 +210,19 @@ static void do_nothing(void *unused) { } +/* + * cpu_idle_wait - Used to ensure that all the CPUs discard old value of + * pm_idle and update to new pm_idle value. Required while changing pm_idle + * handler on SMP systems. + * + * Caller must have changed pm_idle to the new value before the call. Old + * pm_idle value will not be used by any CPU after the return of this function. + */ void cpu_idle_wait(void) { - unsigned int cpu, this_cpu = get_cpu(); - cpumask_t map, tmp = current->cpus_allowed; - - set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); - put_cpu(); - - cpus_clear(map); - for_each_online_cpu(cpu) { - per_cpu(cpu_idle_state, cpu) = 1; - cpu_set(cpu, map); - } - - __get_cpu_var(cpu_idle_state) = 0; - - wmb(); - do { - ssleep(1); - for_each_online_cpu(cpu) { - if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) - cpu_clear(cpu, map); - } - cpus_and(map, map, cpu_online_map); - /* - * We waited 1 sec, if a CPU still did not call idle - * it may be because it is in idle and not waking up - * because it has nothing to do. - * Give all the remaining CPUS a kick. - */ - smp_call_function_mask(map, do_nothing, NULL, 0); - } while (!cpus_empty(map)); - - set_cpus_allowed(current, tmp); + smp_mb(); + /* kick all the CPUs so that they exit out of pm_idle */ + smp_call_function(do_nothing, NULL, 0, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); @@ -273,8 +242,11 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) - __mwait(ax, cx); - } + __sti_mwait(ax, cx); + else + local_irq_enable(); + } else + local_irq_enable(); } /* Default MONITOR/MWAIT with no hints, used for default C1 state */ @@ -357,7 +329,7 @@ void __show_registers(struct pt_regs *regs, int all) init_utsname()->version); printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", - 0xffff & regs->cs, regs->ip, regs->flags, + (u16)regs->cs, regs->ip, regs->flags, smp_processor_id()); print_symbol("EIP is at %s\n", regs->ip); @@ -366,8 +338,7 @@ void __show_registers(struct pt_regs *regs, int all) printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->si, regs->di, regs->bp, sp); printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", - regs->ds & 0xffff, regs->es & 0xffff, - regs->fs & 0xffff, gs, ss); + (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); if (!all) return; @@ -538,11 +509,30 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, return err; } -#ifdef CONFIG_SECCOMP +void +start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) +{ + __asm__("movl %0, %%gs" :: "r"(0)); + regs->fs = 0; + set_fs(USER_DS); + regs->ds = __USER_DS; + regs->es = __USER_DS; + regs->ss = __USER_DS; + regs->cs = __USER_CS; + regs->ip = new_ip; + regs->sp = new_sp; + /* + * Free the old FP and other extended state + */ + free_thread_xstate(current); +} +EXPORT_SYMBOL_GPL(start_thread); + static void hard_disable_TSC(void) { write_cr4(read_cr4() | X86_CR4_TSD); } + void disable_TSC(void) { preempt_disable(); @@ -554,11 +544,47 @@ void disable_TSC(void) hard_disable_TSC(); preempt_enable(); } + static void hard_enable_TSC(void) { write_cr4(read_cr4() & ~X86_CR4_TSD); } -#endif /* CONFIG_SECCOMP */ + +void enable_TSC(void) +{ + preempt_disable(); + if (test_and_clear_thread_flag(TIF_NOTSC)) + /* + * Must flip the CPU state synchronously with + * TIF_NOTSC in the current running context. + */ + hard_enable_TSC(); + preempt_enable(); +} + +int get_tsc_mode(unsigned long adr) +{ + unsigned int val; + + if (test_thread_flag(TIF_NOTSC)) + val = PR_TSC_SIGSEGV; + else + val = PR_TSC_ENABLE; + + return put_user(val, (unsigned int __user *)adr); +} + +int set_tsc_mode(unsigned int val) +{ + if (val == PR_TSC_SIGSEGV) + disable_TSC(); + else if (val == PR_TSC_ENABLE) + enable_TSC(); + else + return -EINVAL; + + return 0; +} static noinline void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, @@ -575,12 +601,12 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, /* we clear debugctl to make sure DS * is not in use when we change it */ debugctl = 0; - wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); + update_debugctlmsr(0); wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); } if (next->debugctlmsr != debugctl) - wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0); + update_debugctlmsr(next->debugctlmsr); if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { set_debugreg(next->debugreg0, 0); @@ -592,7 +618,6 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, set_debugreg(next->debugreg7, 7); } -#ifdef CONFIG_SECCOMP if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ test_tsk_thread_flag(next_p, TIF_NOTSC)) { /* prev and next are different */ @@ -601,7 +626,6 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, else hard_enable_TSC(); } -#endif #ifdef X86_BTS if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) @@ -683,7 +707,7 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct /* we're going to use this soon, after a few expensive things */ if (next_p->fpu_counter > 5) - prefetch(&next->i387.fxsave); + prefetch(next->xstate); /* * Reload esp0. |