summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/byteorder.h1
-rw-r--r--arch/x86/kernel/hpet.c3
-rw-r--r--arch/x86/kernel/kprobes.c2
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/mm/pat.c45
7 files changed, 21 insertions, 43 deletions
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index a9f8a814a1f7..4a8e80cdcfa5 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -22,4 +22,3 @@ unifdef-y += unistd_32.h
unifdef-y += unistd_64.h
unifdef-y += vm86.h
unifdef-y += vsyscall.h
-unifdef-y += swab.h
diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h
index 7c49917e3d9d..b13a7a88f3eb 100644
--- a/arch/x86/include/asm/byteorder.h
+++ b/arch/x86/include/asm/byteorder.h
@@ -1,7 +1,6 @@
#ifndef _ASM_X86_BYTEORDER_H
#define _ASM_X86_BYTEORDER_H
-#include <asm/swab.h>
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_X86_BYTEORDER_H */
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index cd759ad90690..64d5ad0b8add 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -628,11 +628,12 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
switch (action & 0xf) {
case CPU_ONLINE:
- INIT_DELAYED_WORK(&work.work, hpet_work);
+ INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
init_completion(&work.complete);
/* FIXME: add schedule_work_on() */
schedule_delayed_work_on(cpu, &work.work, 0);
wait_for_completion(&work.complete);
+ destroy_timer_on_stack(&work.work.timer);
break;
case CPU_DEAD:
if (hdev) {
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 884d985b8b82..e948b28a5a9a 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -446,7 +446,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
if (p->ainsn.boostable == 1 && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
reset_current_kprobe();
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index d44395ff34c3..e2e86a08f31d 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -88,7 +88,7 @@ ENTRY(sys_call_table)
.long sys_uselib
.long sys_swapon
.long sys_reboot
- .long old_readdir
+ .long sys_old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4cf30dee8161..e89d24815f26 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -555,12 +555,10 @@ repeat:
if (!pte_val(old_pte)) {
if (!primary)
return 0;
-
- /*
- * Special error value returned, indicating that the mapping
- * did not exist at this address.
- */
- return -EFAULT;
+ WARN(1, KERN_WARNING "CPA: called for zero pte. "
+ "vaddr = %lx cpa->vaddr = %lx\n", address,
+ *cpa->vaddr);
+ return -EINVAL;
}
if (level == PG_LEVEL_4K) {
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 160c42d3eb8f..8b08fb955274 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -505,35 +505,6 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
}
#endif /* CONFIG_STRICT_DEVMEM */
-/*
- * Change the memory type for the physial address range in kernel identity
- * mapping space if that range is a part of identity map.
- */
-static int kernel_map_sync_memtype(u64 base, unsigned long size,
- unsigned long flags)
-{
- unsigned long id_sz;
- int ret;
-
- if (!pat_enabled || base >= __pa(high_memory))
- return 0;
-
- id_sz = (__pa(high_memory) < base + size) ?
- __pa(high_memory) - base :
- size;
-
- ret = ioremap_change_attr((unsigned long)__va(base), id_sz, flags);
- /*
- * -EFAULT return means that the addr was not valid and did not have
- * any identity mapping. That case is a success for
- * kernel_map_sync_memtype.
- */
- if (ret == -EFAULT)
- ret = 0;
-
- return ret;
-}
-
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot)
{
@@ -584,7 +555,9 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
if (retval < 0)
return 0;
- if (kernel_map_sync_memtype(offset, size, flags)) {
+ if (((pfn < max_low_pfn_mapped) ||
+ (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
+ ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
free_memtype(offset, offset + size);
printk(KERN_INFO
"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
@@ -632,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
int strict_prot)
{
int is_ram = 0;
- int ret;
+ int id_sz, ret;
unsigned long flags;
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
@@ -673,7 +646,15 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
flags);
}
- if (kernel_map_sync_memtype(paddr, size, flags)) {
+ /* Need to keep identity mapping in sync */
+ if (paddr >= __pa(high_memory))
+ return 0;
+
+ id_sz = (__pa(high_memory) < paddr + size) ?
+ __pa(high_memory) - paddr :
+ size;
+
+ if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
free_memtype(paddr, paddr + size);
printk(KERN_ERR
"%s:%d reserve_pfn_range ioremap_change_attr failed %s "