summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/apm.c2
-rw-r--r--arch/i386/kernel/head.S8
-rw-r--r--arch/i386/kernel/io_apic.c1
-rw-r--r--arch/i386/kernel/nmi.c8
-rw-r--r--arch/i386/kernel/process.c73
-rw-r--r--arch/i386/kernel/ptrace.c17
-rw-r--r--arch/i386/kernel/traps.c3
7 files changed, 59 insertions, 53 deletions
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 4112afe712b9..47001d50a083 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -222,6 +222,7 @@
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/freezer.h>
#include <linux/smp.h>
#include <linux/dmi.h>
#include <linux/suspend.h>
@@ -2311,7 +2312,6 @@ static int __init apm_init(void)
remove_proc_entry("apm", NULL);
return err;
}
- kapmd_task->flags |= PF_NOFREEZE;
wake_up_process(kapmd_task);
if (num_online_cpus() > 1 && !smp ) {
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index f74dfc419b56..82714668d43b 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -168,6 +168,12 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
.section .init.text,"ax",@progbits
#endif
+ /* Do an early initialization of the fixmap area */
+ movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
+ movl $(swapper_pg_pmd - __PAGE_OFFSET), %eax
+ addl $0x007, %eax /* 0x007 = PRESENT+RW+USER */
+ movl %eax, 4092(%edx)
+
#ifdef CONFIG_SMP
ENTRY(startup_32_smp)
cld
@@ -507,6 +513,8 @@ ENTRY(_stext)
.section ".bss.page_aligned","w"
ENTRY(swapper_pg_dir)
.fill 1024,4,0
+ENTRY(swapper_pg_pmd)
+ .fill 1024,4,0
ENTRY(empty_zero_page)
.fill 4096,1,0
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 7f8b7af2b95f..21db8f56c9a1 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -667,6 +667,7 @@ static int balanced_irq(void *unused)
set_pending_irq(i, cpumask_of_cpu(0));
}
+ set_freezable();
for ( ; ; ) {
time_remaining = schedule_timeout_interruptible(time_remaining);
try_to_freeze();
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index fba121f7973f..03b7f5584d71 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -295,7 +295,7 @@ static unsigned int
last_irq_sums [NR_CPUS],
alert_counter [NR_CPUS];
-void touch_nmi_watchdog (void)
+void touch_nmi_watchdog(void)
{
if (nmi_watchdog > 0) {
unsigned cpu;
@@ -304,8 +304,10 @@ void touch_nmi_watchdog (void)
* Just reset the alert counters, (other CPUs might be
* spinning on locks we hold):
*/
- for_each_present_cpu (cpu)
- alert_counter[cpu] = 0;
+ for_each_present_cpu(cpu) {
+ if (alert_counter[cpu])
+ alert_counter[cpu] = 0;
+ }
}
/*
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 06dfa65ad180..6c49acb96982 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -538,8 +538,31 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
return 1;
}
-static noinline void __switch_to_xtra(struct task_struct *next_p,
- struct tss_struct *tss)
+#ifdef CONFIG_SECCOMP
+void hard_disable_TSC(void)
+{
+ write_cr4(read_cr4() | X86_CR4_TSD);
+}
+void disable_TSC(void)
+{
+ preempt_disable();
+ if (!test_and_set_thread_flag(TIF_NOTSC))
+ /*
+ * Must flip the CPU state synchronously with
+ * TIF_NOTSC in the current running context.
+ */
+ hard_disable_TSC();
+ preempt_enable();
+}
+void hard_enable_TSC(void)
+{
+ write_cr4(read_cr4() & ~X86_CR4_TSD);
+}
+#endif /* CONFIG_SECCOMP */
+
+static noinline void
+__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ struct tss_struct *tss)
{
struct thread_struct *next;
@@ -555,6 +578,17 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
set_debugreg(next->debugreg[7], 7);
}
+#ifdef CONFIG_SECCOMP
+ if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
+ test_tsk_thread_flag(next_p, TIF_NOTSC)) {
+ /* prev and next are different */
+ if (test_tsk_thread_flag(next_p, TIF_NOTSC))
+ hard_disable_TSC();
+ else
+ hard_enable_TSC();
+ }
+#endif
+
if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
/*
* Disable the bitmap via an invalid offset. We still cache
@@ -586,33 +620,6 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
}
/*
- * This function selects if the context switch from prev to next
- * has to tweak the TSC disable bit in the cr4.
- */
-static inline void disable_tsc(struct task_struct *prev_p,
- struct task_struct *next_p)
-{
- struct thread_info *prev, *next;
-
- /*
- * gcc should eliminate the ->thread_info dereference if
- * has_secure_computing returns 0 at compile time (SECCOMP=n).
- */
- prev = task_thread_info(prev_p);
- next = task_thread_info(next_p);
-
- if (has_secure_computing(prev) || has_secure_computing(next)) {
- /* slow path here */
- if (has_secure_computing(prev) &&
- !has_secure_computing(next)) {
- write_cr4(read_cr4() & ~X86_CR4_TSD);
- } else if (!has_secure_computing(prev) &&
- has_secure_computing(next))
- write_cr4(read_cr4() | X86_CR4_TSD);
- }
-}
-
-/*
* switch_to(x,yn) should switch tasks from x to y.
*
* We fsave/fwait so that an exception goes off at the right time
@@ -689,11 +696,9 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
/*
* Now maybe handle debug registers and/or IO bitmaps
*/
- if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
- || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
- __switch_to_xtra(next_p, tss);
-
- disable_tsc(prev_p, next_p);
+ if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ __switch_to_xtra(prev_p, next_p, tss);
/*
* Leave lazy mode, flushing any hypercalls made here.
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 0c0ceec5de00..1c075f58d1f9 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -358,17 +358,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp, datap);
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- }
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -395,10 +387,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 90da0575fcff..18c1c285836d 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -390,7 +390,7 @@ void die(const char * str, struct pt_regs * regs, long err)
unsigned long esp;
unsigned short ss;
- report_bug(regs->eip);
+ report_bug(regs->eip, regs);
printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
#ifdef CONFIG_PREEMPT
@@ -433,6 +433,7 @@ void die(const char * str, struct pt_regs * regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
+ add_taint(TAINT_DIE);
spin_unlock_irqrestore(&die.lock, flags);
if (!regs)