summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2012-05-26 13:30:41 -0700
committerOlof Johansson <olof@lixom.net>2012-05-26 13:30:41 -0700
commit67e7ebc21ff5a5bbcb10c9a980896f0e253bcd40 (patch)
treef12706b80194f48aad3a1bd70ae9215d48875591 /arch
parent516fb7a22a5347dc6db731369c365ed1f9b64632 (diff)
parentc533f32e1ac440b8840159f89ece8c9cca2422b5 (diff)
downloadlinux-67e7ebc21ff5a5bbcb10c9a980896f0e253bcd40.tar.bz2
Merge branch 'vexpress-v3.4' of git://git.linaro.org/people/pawelmoll/linux into late/soc
A few device tree updates and an include file fix for versatile. * 'vexpress-v3.4' of git://git.linaro.org/people/pawelmoll/linux: ARM: vexpress: Remove twice included header files ARM: vexpress: Device Tree updates + update to 3.4 Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts13
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts13
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca9.dts9
-rw-r--r--arch/arm/mach-prima2/irq.c6
-rw-r--r--arch/arm/mach-tegra/flowctrl.c4
-rw-r--r--arch/arm/mach-vexpress/v2m.c2
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/mm/mmu.c3
-rw-r--r--arch/arm/vfp/vfpmodule.c24
-rw-r--r--arch/frv/include/asm/processor.h4
-rw-r--r--arch/mn10300/kernel/smp.c9
-rw-r--r--arch/parisc/include/asm/prefetch.h7
-rw-r--r--arch/parisc/kernel/entry.S4
-rw-r--r--arch/parisc/kernel/pacache.S38
-rw-r--r--arch/parisc/kernel/smp.c8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S42
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/thread_info.h9
-rw-r--r--arch/tile/kernel/compat_signal.c12
-rw-r--r--arch/tile/kernel/intvec_32.S41
-rw-r--r--arch/tile/kernel/intvec_64.S38
-rw-r--r--arch/tile/kernel/process.c7
-rw-r--r--arch/x86/Makefile3
-rw-r--r--arch/x86/boot/compressed/Makefile9
-rw-r--r--arch/x86/include/asm/kvm_para.h3
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c14
-rw-r--r--arch/x86/kernel/microcode_intel.c14
-rw-r--r--arch/x86/tools/.gitignore1
-rw-r--r--arch/x86/tools/Makefile4
-rw-r--r--arch/x86/tools/relocs.c (renamed from arch/x86/boot/compressed/relocs.c)242
34 files changed, 424 insertions, 187 deletions
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 941b161ab78c..7e1091d91af8 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -73,7 +73,10 @@
#address-cells = <0>;
interrupt-controller;
reg = <0x2c001000 0x1000>,
- <0x2c002000 0x100>;
+ <0x2c002000 0x1000>,
+ <0x2c004000 0x2000>,
+ <0x2c006000 0x2000>;
+ interrupts = <1 9 0xf04>;
};
memory-controller@7ffd0000 {
@@ -93,6 +96,14 @@
<0 91 4>;
};
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
pmu {
compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
interrupts = <0 68 4>,
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index 6905e66d4748..18917a0f8604 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -77,13 +77,18 @@
timer@2c000600 {
compatible = "arm,cortex-a5-twd-timer";
- reg = <0x2c000600 0x38>;
- interrupts = <1 2 0x304>,
- <1 3 0x304>;
+ reg = <0x2c000600 0x20>;
+ interrupts = <1 13 0x304>;
+ };
+
+ watchdog@2c000620 {
+ compatible = "arm,cortex-a5-twd-wdt";
+ reg = <0x2c000620 0x20>;
+ interrupts = <1 14 0x304>;
};
gic: interrupt-controller@2c001000 {
- compatible = "arm,corex-a5-gic", "arm,cortex-a9-gic";
+ compatible = "arm,cortex-a5-gic", "arm,cortex-a9-gic";
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index da778693be54..3f0c736d31d6 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -105,8 +105,13 @@
timer@1e000600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x1e000600 0x20>;
- interrupts = <1 2 0xf04>,
- <1 3 0xf04>;
+ interrupts = <1 13 0xf04>;
+ };
+
+ watchdog@1e000620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0x1e000620 0x20>;
+ interrupts = <1 14 0xf04>;
};
gic: interrupt-controller@1e001000 {
diff --git a/arch/arm/mach-prima2/irq.c b/arch/arm/mach-prima2/irq.c
index 37c2de9b6f26..a7b9415d30f8 100644
--- a/arch/arm/mach-prima2/irq.c
+++ b/arch/arm/mach-prima2/irq.c
@@ -42,7 +42,8 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
static __init void sirfsoc_irq_init(void)
{
sirfsoc_alloc_gc(sirfsoc_intc_base, 0, 32);
- sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, SIRFSOC_INTENAL_IRQ_END - 32);
+ sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32,
+ SIRFSOC_INTENAL_IRQ_END + 1 - 32);
writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0);
writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1);
@@ -68,7 +69,8 @@ void __init sirfsoc_of_irq_init(void)
if (!sirfsoc_intc_base)
panic("unable to map intc cpu registers\n");
- irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL);
+ irq_domain_add_legacy(np, SIRFSOC_INTENAL_IRQ_END + 1, 0, 0,
+ &irq_domain_simple_ops, NULL);
of_node_put(np);
diff --git a/arch/arm/mach-tegra/flowctrl.c b/arch/arm/mach-tegra/flowctrl.c
index fef66a7486ed..f07488e0bd32 100644
--- a/arch/arm/mach-tegra/flowctrl.c
+++ b/arch/arm/mach-tegra/flowctrl.c
@@ -53,10 +53,10 @@ static void flowctrl_update(u8 offset, u32 value)
void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
{
- return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
+ return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
}
void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value)
{
- return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
+ return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
}
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 47cdcca5a7e7..e769ab612627 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -14,7 +14,6 @@
#include <linux/ata_platform.h>
#include <linux/smsc911x.h>
#include <linux/spinlock.h>
-#include <linux/device.h>
#include <linux/usb/isp1760.h>
#include <linux/clkdev.h>
#include <linux/mtd/physmap.h>
@@ -29,7 +28,6 @@
#include <asm/hardware/gic.h>
#include <asm/hardware/timer-sp.h>
#include <asm/hardware/sp810.h>
-#include <asm/hardware/gic.h>
#include <mach/ct-ca9x4.h>
#include <mach/motherboard.h>
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index f07467533365..5bb48356d217 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -247,7 +247,9 @@ good_area:
return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
check_stack:
- if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
+ /* Don't allow expansion below FIRST_USER_ADDRESS */
+ if (vma->vm_flags & VM_GROWSDOWN &&
+ addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
goto good_area;
out:
return fault;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 2c7cf2f9c837..aa78de8bfdd3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -489,7 +489,8 @@ static void __init build_mem_type_table(void)
*/
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_pte |= PTE_EXT_AF;
- mem_types[i].prot_sect |= PMD_SECT_AF;
+ if (mem_types[i].prot_sect)
+ mem_types[i].prot_sect |= PMD_SECT_AF;
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index bc683b8219b5..b0197b2c857d 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
+#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/signal.h>
@@ -432,7 +433,10 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
static void vfp_enable(void *unused)
{
- u32 access = get_copro_access();
+ u32 access;
+
+ BUG_ON(preemptible());
+ access = get_copro_access();
/*
* Enable full access to VFP (cp10 and cp11)
@@ -573,12 +577,6 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
* entry.
*/
hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
-
- /*
- * Disable VFP in the hwstate so that we can detect if it gets
- * used.
- */
- hwstate->fpexc &= ~FPEXC_EN;
return 0;
}
@@ -591,12 +589,8 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
unsigned long fpexc;
int err = 0;
- /*
- * If VFP has been used, then disable it to avoid corrupting
- * the new thread state.
- */
- if (hwstate->fpexc & FPEXC_EN)
- vfp_flush_hwstate(thread);
+ /* Disable VFP to avoid corrupting the new thread state. */
+ vfp_flush_hwstate(thread);
/*
* Copy the floating point registers. There can be unused
@@ -657,7 +651,7 @@ static int __init vfp_init(void)
unsigned int cpu_arch = cpu_architecture();
if (cpu_arch >= CPU_ARCH_ARMv6)
- vfp_enable(NULL);
+ on_each_cpu(vfp_enable, NULL, 1);
/*
* First check that there is a VFP that we can use.
@@ -678,8 +672,6 @@ static int __init vfp_init(void)
} else {
hotcpu_notifier(vfp_hotplug, 0);
- smp_call_function(vfp_enable, NULL, 1);
-
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h
index 81c2e271d620..9b1a92b73f60 100644
--- a/arch/frv/include/asm/processor.h
+++ b/arch/frv/include/asm/processor.h
@@ -135,10 +135,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
-/* Allocation and freeing of basic task resources. */
-extern struct task_struct *alloc_task_struct_node(int node);
-extern void free_task_struct(struct task_struct *p);
-
#define cpu_relax() barrier()
/* data cache prefetch */
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 910dddf65e44..9cd69ad6aa02 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/profile.h>
#include <linux/smp.h>
+#include <linux/cpu.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
#include <asm/processor.h>
@@ -38,7 +39,6 @@
#include "internal.h"
#ifdef CONFIG_HOTPLUG_CPU
-#include <linux/cpu.h>
#include <asm/cacheflush.h>
static unsigned long sleep_mode[NR_CPUS];
@@ -874,10 +874,13 @@ static void __init smp_online(void)
cpu = smp_processor_id();
- local_irq_enable();
+ notify_cpu_starting(cpu);
+ ipi_call_lock();
set_cpu_online(cpu, true);
- smp_wmb();
+ ipi_call_unlock();
+
+ local_irq_enable();
}
/**
diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h
index c5edc60c059f..1ee7c82672c1 100644
--- a/arch/parisc/include/asm/prefetch.h
+++ b/arch/parisc/include/asm/prefetch.h
@@ -21,7 +21,12 @@
#define ARCH_HAS_PREFETCH
static inline void prefetch(const void *addr)
{
- __asm__("ldw 0(%0), %%r0" : : "r" (addr));
+ __asm__(
+#ifndef CONFIG_PA20
+ /* Need to avoid prefetch of NULL on PA7300LC */
+ " extrw,u,= %0,31,32,%%r0\n"
+#endif
+ " ldw 0(%0), %%r0" : : "r" (addr));
}
/* LDD is a PA2.0 addition. */
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 6f0594439143..535034217021 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -581,7 +581,11 @@
*/
cmpiclr,= 0x01,\tmp,%r0
ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
+#ifdef CONFIG_64BIT
depd,z \prot,8,7,\prot
+#else
+ depw,z \prot,8,7,\prot
+#endif
/*
* OK, it is in the temp alias region, check whether "from" or "to".
* Check "subtle" note in pacache.S re: r23/r26.
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 93ff3d90edd1..5d7218ad885c 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -692,7 +692,7 @@ ENTRY(flush_icache_page_asm)
/* Purge any old translation */
- pitlb (%sr0,%r28)
+ pitlb (%sr4,%r28)
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r1
@@ -706,27 +706,29 @@ ENTRY(flush_icache_page_asm)
sub %r25, %r1, %r25
-1: fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
- fic,m %r1(%r28)
+ /* fic only has the type 26 form on PA1.1, requiring an
+ * explicit space specification, so use %sr4 */
+1: fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
cmpb,COND(<<) %r28, %r25,1b
- fic,m %r1(%r28)
+ fic,m %r1(%sr4,%r28)
sync
bv %r0(%r2)
- pitlb (%sr0,%r25)
+ pitlb (%sr4,%r25)
.exit
.procend
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 0bb1d63907f8..4dc7b7942b4c 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/ftrace.h>
+#include <linux/cpu.h>
#include <linux/atomic.h>
#include <asm/current.h>
@@ -295,8 +296,13 @@ smp_cpu_init(int cpunum)
printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
machine_halt();
- }
+ }
+
+ notify_cpu_starting(cpunum);
+
+ ipi_call_lock();
set_cpu_online(cpunum, true);
+ ipi_call_unlock();
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index aa795ccef294..fd07f43d6622 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s {
u64 sdr1;
u64 hior;
u64 msr_mask;
- u64 vsid_next;
#ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE];
+ u32 vsid_next;
#else
- u64 vsid_first;
- u64 vsid_max;
+ u64 proto_vsid_first;
+ u64 proto_vsid_max;
+ u64 proto_vsid_next;
#endif
int context_id[SID_CONTEXTS];
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 6f87f39a1ac2..10fc8ec9d2a8 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
backwards_map = !backwards_map;
/* Uh-oh ... out of mappings. Let's flush! */
- if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
- vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
+ if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
+ vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
memset(vcpu_book3s->sid_map, 0,
sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
kvmppc_mmu_pte_flush(vcpu, 0, 0);
kvmppc_mmu_flush_segments(vcpu);
}
- map->host_vsid = vcpu_book3s->vsid_next++;
+ map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
map->guest_vsid = gvsid;
map->valid = true;
@@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
return -1;
vcpu3s->context_id[0] = err;
- vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1;
- vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
- vcpu3s->vsid_next = vcpu3s->vsid_first;
+ vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
+ << USER_ESID_BITS) - 1;
+ vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
+ vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index def880aea63a..cec4daddbf31 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -463,6 +463,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
/* insert R and C bits from PTE */
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
args[j] |= rcbits << (56 - 5);
+ hp[0] = 0;
continue;
}
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 0676ae249b9f..6e6e9cef34a8 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -197,7 +197,8 @@ kvmppc_interrupt:
/* Save guest PC and MSR */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
- andi. r0,r12,0x2
+ andi. r0, r12, 0x2
+ cmpwi cr1, r0, 0
beq 1f
mfspr r3,SPRN_HSRR0
mfspr r4,SPRN_HSRR1
@@ -250,6 +251,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
beq ld_last_prev_inst
cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
beq- ld_last_inst
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
+ cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
+ beq- ld_last_inst
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+#endif
b no_ld_last_inst
@@ -316,23 +323,17 @@ no_dcbz32_off:
* Having set up SRR0/1 with the address where we want
* to continue with relocation on (potentially in module
* space), we either just go straight there with rfi[d],
- * or we jump to an interrupt handler with bctr if there
- * is an interrupt to be handled first. In the latter
- * case, the rfi[d] at the end of the interrupt handler
- * will get us back to where we want to continue.
+ * or we jump to an interrupt handler if there is an
+ * interrupt to be handled first. In the latter case,
+ * the rfi[d] at the end of the interrupt handler will
+ * get us back to where we want to continue.
*/
- cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
- beq 1f
- cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
- beq 1f
- cmpwi r12, BOOK3S_INTERRUPT_PERFMON
-1: mtctr r12
-
/* Register usage at this point:
*
* R1 = host R1
* R2 = host R2
+ * R10 = raw exit handler id
* R12 = exit handler id
* R13 = shadow vcpu (32-bit) or PACA (64-bit)
* SVCPU.* = guest *
@@ -342,12 +343,25 @@ no_dcbz32_off:
PPC_LL r6, HSTATE_HOST_MSR(r13)
PPC_LL r8, HSTATE_VMHANDLER(r13)
- /* Restore host msr -> SRR1 */
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
+ beq cr1, 1f
+ mtspr SPRN_HSRR1, r6
+ mtspr SPRN_HSRR0, r8
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+#endif
+1: /* Restore host msr -> SRR1 */
mtsrr1 r6
/* Load highmem handler address */
mtsrr0 r8
/* RFI into the highmem handler, or jump to interrupt handler */
- beqctr
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
+ beqa BOOK3S_INTERRUPT_EXTERNAL
+ cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
+ beqa BOOK3S_INTERRUPT_DECREMENTER
+ cmpwi r12, BOOK3S_INTERRUPT_PERFMON
+ beqa BOOK3S_INTERRUPT_PERFMON
+
RFI
kvmppc_handler_trampoline_exit_end:
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 96033e2d6845..74239dd77e06 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -11,6 +11,7 @@ config TILE
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
+ select HAVE_SYSCALL_WRAPPERS if TILEGX
select SYS_HYPERVISOR
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index bc4f562bd459..7594764d8a69 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -100,9 +100,14 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
#else /* __ASSEMBLY__ */
-/* how to get the thread information struct from ASM */
+/*
+ * How to get the thread information struct from assembly.
+ * Note that we use different macros since different architectures
+ * have different semantics in their "mm" instruction and we would
+ * like to guarantee that the macro expands to exactly one instruction.
+ */
#ifdef __tilegx__
-#define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63
+#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63
#else
#define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31
#endif
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 77763ccd5a7d..cdef6e5ec022 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -403,19 +403,17 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* Set up registers for signal handler.
* Registers that we don't modify keep the value they had from
* user-space at the time we took the signal.
+ * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+ * since some things rely on this (e.g. glibc's debug/segfault.c).
*/
regs->pc = ptr_to_compat_reg(ka->sa.sa_handler);
regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
regs->sp = ptr_to_compat_reg(frame);
regs->lr = restorer;
regs->regs[0] = (unsigned long) usig;
-
- if (ka->sa.sa_flags & SA_SIGINFO) {
- /* Need extra arguments, so mark to restore caller-saves. */
- regs->regs[1] = ptr_to_compat_reg(&frame->info);
- regs->regs[2] = ptr_to_compat_reg(&frame->uc);
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- }
+ regs->regs[1] = ptr_to_compat_reg(&frame->info);
+ regs->regs[2] = ptr_to_compat_reg(&frame->uc);
+ regs->flags |= PT_FLAGS_CALLER_SAVES;
/*
* Notify any tracer that was single-stepping it.
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 5d56a1ef5ba5..6943515100f8 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -839,6 +839,18 @@ STD_ENTRY(interrupt_return)
FEEDBACK_REENTER(interrupt_return)
/*
+ * Use r33 to hold whether we have already loaded the callee-saves
+ * into ptregs. We don't want to do it twice in this loop, since
+ * then we'd clobber whatever changes are made by ptrace, etc.
+ * Get base of stack in r32.
+ */
+ {
+ GET_THREAD_INFO(r32)
+ movei r33, 0
+ }
+
+.Lretry_work_pending:
+ /*
* Disable interrupts so as to make sure we don't
* miss an interrupt that sets any of the thread flags (like
* need_resched or sigpending) between sampling and the iret.
@@ -848,9 +860,6 @@ STD_ENTRY(interrupt_return)
IRQ_DISABLE(r20, r21)
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
- /* Get base of stack in r32; note r30/31 are used as arguments here. */
- GET_THREAD_INFO(r32)
-
/* Check to see if there is any work to do before returning to user. */
{
@@ -866,16 +875,18 @@ STD_ENTRY(interrupt_return)
/*
* Make sure we have all the registers saved for signal
- * handling or single-step. Call out to C code to figure out
- * exactly what we need to do for each flag bit, then if
- * necessary, reload the flags and recheck.
+ * handling, notify-resume, or single-step. Call out to C
+ * code to figure out exactly what we need to do for each flag bit,
+ * then if necessary, reload the flags and recheck.
*/
- push_extra_callee_saves r0
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_work_pending
+ bnz r33, 1f
}
- bnz r0, .Lresume_userspace
+ push_extra_callee_saves r0
+ movei r33, 1
+1: jal do_work_pending
+ bnz r0, .Lretry_work_pending
/*
* In the NMI case we
@@ -1180,10 +1191,12 @@ handle_syscall:
add r20, r20, tp
lw r21, r20
addi r21, r21, 1
- sw r20, r21
+ {
+ sw r20, r21
+ GET_THREAD_INFO(r31)
+ }
/* Trace syscalls, if requested. */
- GET_THREAD_INFO(r31)
addi r31, r31, THREAD_INFO_FLAGS_OFFSET
lw r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE
@@ -1362,7 +1375,10 @@ handle_ill:
3:
/* set PC and continue */
lw r26, r24
- sw r28, r26
+ {
+ sw r28, r26
+ GET_THREAD_INFO(r0)
+ }
/*
* Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
@@ -1370,7 +1386,6 @@ handle_ill:
* need to clear it here and can't really impose on all other arches.
* So what's another write between friends?
*/
- GET_THREAD_INFO(r0)
addi r1, r0, THREAD_INFO_FLAGS_OFFSET
{
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 49d9d6621682..30ae76e50c44 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -647,6 +647,20 @@ STD_ENTRY(interrupt_return)
FEEDBACK_REENTER(interrupt_return)
/*
+ * Use r33 to hold whether we have already loaded the callee-saves
+ * into ptregs. We don't want to do it twice in this loop, since
+ * then we'd clobber whatever changes are made by ptrace, etc.
+ */
+ {
+ movei r33, 0
+ move r32, sp
+ }
+
+ /* Get base of stack in r32. */
+ EXTRACT_THREAD_INFO(r32)
+
+.Lretry_work_pending:
+ /*
* Disable interrupts so as to make sure we don't
* miss an interrupt that sets any of the thread flags (like
* need_resched or sigpending) between sampling and the iret.
@@ -656,9 +670,6 @@ STD_ENTRY(interrupt_return)
IRQ_DISABLE(r20, r21)
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
- /* Get base of stack in r32; note r30/31 are used as arguments here. */
- GET_THREAD_INFO(r32)
-
/* Check to see if there is any work to do before returning to user. */
{
@@ -674,16 +685,18 @@ STD_ENTRY(interrupt_return)
/*
* Make sure we have all the registers saved for signal
- * handling or single-step. Call out to C code to figure out
+ * handling or notify-resume. Call out to C code to figure out
* exactly what we need to do for each flag bit, then if
* necessary, reload the flags and recheck.
*/
- push_extra_callee_saves r0
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_work_pending
+ bnez r33, 1f
}
- bnez r0, .Lresume_userspace
+ push_extra_callee_saves r0
+ movei r33, 1
+1: jal do_work_pending
+ bnez r0, .Lretry_work_pending
/*
* In the NMI case we
@@ -968,11 +981,16 @@ handle_syscall:
shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
add r20, r20, tp
ld4s r21, r20
- addi r21, r21, 1
- st4 r20, r21
+ {
+ addi r21, r21, 1
+ move r31, sp
+ }
+ {
+ st4 r20, r21
+ EXTRACT_THREAD_INFO(r31)
+ }
/* Trace syscalls, if requested. */
- GET_THREAD_INFO(r31)
addi r31, r31, THREAD_INFO_FLAGS_OFFSET
ld r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 2d5ef617bb39..54e6c64b85cc 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -567,6 +567,10 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
*/
int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
{
+ /* If we enter in kernel mode, do nothing and exit the caller loop. */
+ if (!user_mode(regs))
+ return 0;
+
if (thread_info_flags & _TIF_NEED_RESCHED) {
schedule();
return 1;
@@ -589,8 +593,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
return 1;
}
if (thread_info_flags & _TIF_SINGLESTEP) {
- if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0)
- single_step_once(regs);
+ single_step_once(regs);
return 0;
}
panic("work_pending: bad flags %#x\n", thread_info_flags);
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 41a7237606a3..94e91e401da9 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -134,6 +134,9 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
KBUILD_CFLAGS += $(mflags-y)
KBUILD_AFLAGS += $(mflags-y)
+archscripts:
+ $(Q)$(MAKE) $(build)=arch/x86/tools relocs
+
###
# Syscall table generation
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index fd55a2ff3ad8..e398bb5d63bb 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -40,13 +40,12 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
+targets += vmlinux.bin.all vmlinux.relocs
-targets += vmlinux.bin.all vmlinux.relocs relocs
-hostprogs-$(CONFIG_X86_NEED_RELOCS) += relocs
-
+CMD_RELOCS = arch/x86/tools/relocs
quiet_cmd_relocs = RELOCS $@
- cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
-$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
+ cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $<
+$(obj)/vmlinux.relocs: vmlinux FORCE
$(call if_changed,relocs)
vmlinux.bin.all-y := $(obj)/vmlinux.bin
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 734c3767cfac..183922e13de1 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -170,6 +170,9 @@ static inline int kvm_para_available(void)
unsigned int eax, ebx, ecx, edx;
char signature[13];
+ if (boot_cpu_data.cpuid_level < 0)
+ return 0; /* So we don't blow up on old processors */
+
cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
memcpy(signature + 0, &ebx, 4);
memcpy(signature + 4, &ecx, 4);
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index a415b1f44365..7c439fe4941b 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -593,7 +593,7 @@ void __init acpi_set_irq_model_ioapic(void)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
-static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
int nid;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index d086a09c087d..11c9166c3337 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -945,9 +945,10 @@ struct mce_info {
atomic_t inuse;
struct task_struct *t;
__u64 paddr;
+ int restartable;
} mce_info[MCE_INFO_MAX];
-static void mce_save_info(__u64 addr)
+static void mce_save_info(__u64 addr, int c)
{
struct mce_info *mi;
@@ -955,6 +956,7 @@ static void mce_save_info(__u64 addr)
if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
mi->t = current;
mi->paddr = addr;
+ mi->restartable = c;
return;
}
}
@@ -1130,7 +1132,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
mce_panic("Fatal machine check on current CPU", &m, msg);
if (worst == MCE_AR_SEVERITY) {
/* schedule action before return to userland */
- mce_save_info(m.addr);
+ mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
set_thread_flag(TIF_MCE_NOTIFY);
} else if (kill_it) {
force_sig(SIGBUS, current);
@@ -1179,7 +1181,13 @@ void mce_notify_process(void)
pr_err("Uncorrected hardware memory error in user-access at %llx",
mi->paddr);
- if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0) {
+ /*
+ * We must call memory_failure() here even if the current process is
+ * doomed. We still need to mark the page as poisoned and alert any
+ * other users of the page.
+ */
+ if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
+ mi->restartable == 0) {
pr_err("Memory error not recovered");
force_sig(SIGBUS, current);
}
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 3ca42d0e43a2..0327e2b3c408 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -147,12 +147,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
memset(csig, 0, sizeof(*csig));
- if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
- cpu_has(c, X86_FEATURE_IA64)) {
- pr_err("CPU%d not a capable Intel processor\n", cpu_num);
- return -1;
- }
-
csig->sig = cpuid_eax(0x00000001);
if ((c->x86_model >= 5) || (c->x86 > 6)) {
@@ -463,6 +457,14 @@ static struct microcode_ops microcode_intel_ops = {
struct microcode_ops * __init init_intel_microcode(void)
{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
+ cpu_has(c, X86_FEATURE_IA64)) {
+ pr_err("Intel CPU family 0x%x not supported\n", c->x86);
+ return NULL;
+ }
+
return &microcode_intel_ops;
}
diff --git a/arch/x86/tools/.gitignore b/arch/x86/tools/.gitignore
new file mode 100644
index 000000000000..be0ed065249b
--- /dev/null
+++ b/arch/x86/tools/.gitignore
@@ -0,0 +1 @@
+relocs
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index d511aa97533a..733057b435b0 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -36,3 +36,7 @@ HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x
$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+hostprogs-y += relocs
+relocs: $(obj)/relocs
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/tools/relocs.c
index fb7117a4ade1..b43cfcd9bf40 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -18,6 +18,8 @@ static void die(char *fmt, ...);
static Elf32_Ehdr ehdr;
static unsigned long reloc_count, reloc_idx;
static unsigned long *relocs;
+static unsigned long reloc16_count, reloc16_idx;
+static unsigned long *relocs16;
struct section {
Elf32_Shdr shdr;
@@ -28,52 +30,86 @@ struct section {
};
static struct section *secs;
+enum symtype {
+ S_ABS,
+ S_REL,
+ S_SEG,
+ S_LIN,
+ S_NSYMTYPES
+};
+
+static const char * const sym_regex_kernel[S_NSYMTYPES] = {
/*
* Following symbols have been audited. There values are constant and do
* not change if bzImage is loaded at a different physical address than
* the address for which it has been compiled. Don't warn user about
* absolute relocations present w.r.t these symbols.
*/
-static const char abs_sym_regex[] =
+ [S_ABS] =
"^(xen_irq_disable_direct_reloc$|"
"xen_save_fl_direct_reloc$|"
"VDSO|"
- "__crc_)";
-static regex_t abs_sym_regex_c;
-static int is_abs_reloc(const char *sym_name)
-{
- return !regexec(&abs_sym_regex_c, sym_name, 0, NULL, 0);
-}
+ "__crc_)",
/*
* These symbols are known to be relative, even if the linker marks them
* as absolute (typically defined outside any section in the linker script.)
*/
-static const char rel_sym_regex[] =
- "^_end$";
-static regex_t rel_sym_regex_c;
-static int is_rel_reloc(const char *sym_name)
+ [S_REL] =
+ "^(__init_(begin|end)|"
+ "__x86_cpu_dev_(start|end)|"
+ "(__parainstructions|__alt_instructions)(|_end)|"
+ "(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
+ "_end)$"
+};
+
+
+static const char * const sym_regex_realmode[S_NSYMTYPES] = {
+/*
+ * These are 16-bit segment symbols when compiling 16-bit code.
+ */
+ [S_SEG] =
+ "^real_mode_seg$",
+
+/*
+ * These are offsets belonging to segments, as opposed to linear addresses,
+ * when compiling 16-bit code.
+ */
+ [S_LIN] =
+ "^pa_",
+};
+
+static const char * const *sym_regex;
+
+static regex_t sym_regex_c[S_NSYMTYPES];
+static int is_reloc(enum symtype type, const char *sym_name)
{
- return !regexec(&rel_sym_regex_c, sym_name, 0, NULL, 0);
+ return sym_regex[type] &&
+ !regexec(&sym_regex_c[type], sym_name, 0, NULL, 0);
}
-static void regex_init(void)
+static void regex_init(int use_real_mode)
{
char errbuf[128];
int err;
-
- err = regcomp(&abs_sym_regex_c, abs_sym_regex,
- REG_EXTENDED|REG_NOSUB);
- if (err) {
- regerror(err, &abs_sym_regex_c, errbuf, sizeof errbuf);
- die("%s", errbuf);
- }
+ int i;
+
+ if (use_real_mode)
+ sym_regex = sym_regex_realmode;
+ else
+ sym_regex = sym_regex_kernel;
- err = regcomp(&rel_sym_regex_c, rel_sym_regex,
- REG_EXTENDED|REG_NOSUB);
- if (err) {
- regerror(err, &rel_sym_regex_c, errbuf, sizeof errbuf);
- die("%s", errbuf);
+ for (i = 0; i < S_NSYMTYPES; i++) {
+ if (!sym_regex[i])
+ continue;
+
+ err = regcomp(&sym_regex_c[i], sym_regex[i],
+ REG_EXTENDED|REG_NOSUB);
+
+ if (err) {
+ regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf);
+ die("%s", errbuf);
+ }
}
}
@@ -154,6 +190,10 @@ static const char *rel_type(unsigned type)
REL_TYPE(R_386_RELATIVE),
REL_TYPE(R_386_GOTOFF),
REL_TYPE(R_386_GOTPC),
+ REL_TYPE(R_386_8),
+ REL_TYPE(R_386_PC8),
+ REL_TYPE(R_386_16),
+ REL_TYPE(R_386_PC16),
#undef REL_TYPE
};
const char *name = "unknown type rel type name";
@@ -189,7 +229,7 @@ static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
name = sym_strtab + sym->st_name;
}
else {
- name = sec_name(secs[sym->st_shndx].shdr.sh_name);
+ name = sec_name(sym->st_shndx);
}
return name;
}
@@ -472,7 +512,7 @@ static void print_absolute_relocs(void)
* Before warning check if this absolute symbol
* relocation is harmless.
*/
- if (is_abs_reloc(name) || is_rel_reloc(name))
+ if (is_reloc(S_ABS, name) || is_reloc(S_REL, name))
continue;
if (!printed) {
@@ -496,7 +536,8 @@ static void print_absolute_relocs(void)
printf("\n");
}
-static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
+ int use_real_mode)
{
int i;
/* Walk through the relocations */
@@ -521,30 +562,67 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
Elf32_Rel *rel;
Elf32_Sym *sym;
unsigned r_type;
+ const char *symname;
+ int shn_abs;
+
rel = &sec->reltab[j];
sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
r_type = ELF32_R_TYPE(rel->r_info);
- /* Don't visit relocations to absolute symbols */
- if (sym->st_shndx == SHN_ABS &&
- !is_rel_reloc(sym_name(sym_strtab, sym))) {
- continue;
- }
+
+ shn_abs = sym->st_shndx == SHN_ABS;
+
switch (r_type) {
case R_386_NONE:
case R_386_PC32:
+ case R_386_PC16:
+ case R_386_PC8:
/*
* NONE can be ignored and and PC relative
* relocations don't need to be adjusted.
*/
break;
+
+ case R_386_16:
+ symname = sym_name(sym_strtab, sym);
+ if (!use_real_mode)
+ goto bad;
+ if (shn_abs) {
+ if (is_reloc(S_ABS, symname))
+ break;
+ else if (!is_reloc(S_SEG, symname))
+ goto bad;
+ } else {
+ if (is_reloc(S_LIN, symname))
+ goto bad;
+ else
+ break;
+ }
+ visit(rel, sym);
+ break;
+
case R_386_32:
- /* Visit relocations that need to be adjusted */
+ symname = sym_name(sym_strtab, sym);
+ if (shn_abs) {
+ if (is_reloc(S_ABS, symname))
+ break;
+ else if (!is_reloc(S_REL, symname))
+ goto bad;
+ } else {
+ if (use_real_mode &&
+ !is_reloc(S_LIN, symname))
+ break;
+ }
visit(rel, sym);
break;
default:
die("Unsupported relocation type: %s (%d)\n",
rel_type(r_type), r_type);
break;
+ bad:
+ symname = sym_name(sym_strtab, sym);
+ die("Invalid %s %s relocation: %s\n",
+ shn_abs ? "absolute" : "relative",
+ rel_type(r_type), symname);
}
}
}
@@ -552,13 +630,19 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
{
- reloc_count += 1;
+ if (ELF32_R_TYPE(rel->r_info) == R_386_16)
+ reloc16_count++;
+ else
+ reloc_count++;
}
static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
{
/* Remember the address that needs to be adjusted. */
- relocs[reloc_idx++] = rel->r_offset;
+ if (ELF32_R_TYPE(rel->r_info) == R_386_16)
+ relocs16[reloc16_idx++] = rel->r_offset;
+ else
+ relocs[reloc_idx++] = rel->r_offset;
}
static int cmp_relocs(const void *va, const void *vb)
@@ -568,23 +652,41 @@ static int cmp_relocs(const void *va, const void *vb)
return (*a == *b)? 0 : (*a > *b)? 1 : -1;
}
-static void emit_relocs(int as_text)
+static int write32(unsigned int v, FILE *f)
+{
+ unsigned char buf[4];
+
+ put_unaligned_le32(v, buf);
+ return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
+}
+
+static void emit_relocs(int as_text, int use_real_mode)
{
int i;
/* Count how many relocations I have and allocate space for them. */
reloc_count = 0;
- walk_relocs(count_reloc);
+ walk_relocs(count_reloc, use_real_mode);
relocs = malloc(reloc_count * sizeof(relocs[0]));
if (!relocs) {
die("malloc of %d entries for relocs failed\n",
reloc_count);
}
+
+ relocs16 = malloc(reloc16_count * sizeof(relocs[0]));
+ if (!relocs16) {
+ die("malloc of %d entries for relocs16 failed\n",
+ reloc16_count);
+ }
/* Collect up the relocations */
reloc_idx = 0;
- walk_relocs(collect_reloc);
+ walk_relocs(collect_reloc, use_real_mode);
+
+ if (reloc16_count && !use_real_mode)
+ die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */
qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
+ qsort(relocs16, reloc16_count, sizeof(relocs16[0]), cmp_relocs);
/* Print the relocations */
if (as_text) {
@@ -593,58 +695,83 @@ static void emit_relocs(int as_text)
*/
printf(".section \".data.reloc\",\"a\"\n");
printf(".balign 4\n");
- for (i = 0; i < reloc_count; i++) {
- printf("\t .long 0x%08lx\n", relocs[i]);
+ if (use_real_mode) {
+ printf("\t.long %lu\n", reloc16_count);
+ for (i = 0; i < reloc16_count; i++)
+ printf("\t.long 0x%08lx\n", relocs16[i]);
+ printf("\t.long %lu\n", reloc_count);
+ for (i = 0; i < reloc_count; i++) {
+ printf("\t.long 0x%08lx\n", relocs[i]);
+ }
+ } else {
+ /* Print a stop */
+ printf("\t.long 0x%08lx\n", (unsigned long)0);
+ for (i = 0; i < reloc_count; i++) {
+ printf("\t.long 0x%08lx\n", relocs[i]);
+ }
}
+
printf("\n");
}
else {
- unsigned char buf[4];
- /* Print a stop */
- fwrite("\0\0\0\0", 4, 1, stdout);
- /* Now print each relocation */
- for (i = 0; i < reloc_count; i++) {
- put_unaligned_le32(relocs[i], buf);
- fwrite(buf, 4, 1, stdout);
+ if (use_real_mode) {
+ write32(reloc16_count, stdout);
+ for (i = 0; i < reloc16_count; i++)
+ write32(relocs16[i], stdout);
+ write32(reloc_count, stdout);
+
+ /* Now print each relocation */
+ for (i = 0; i < reloc_count; i++)
+ write32(relocs[i], stdout);
+ } else {
+ /* Print a stop */
+ write32(0, stdout);
+
+ /* Now print each relocation */
+ for (i = 0; i < reloc_count; i++) {
+ write32(relocs[i], stdout);
+ }
}
}
}
static void usage(void)
{
- die("relocs [--abs-syms |--abs-relocs | --text] vmlinux\n");
+ die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n");
}
int main(int argc, char **argv)
{
int show_absolute_syms, show_absolute_relocs;
- int as_text;
+ int as_text, use_real_mode;
const char *fname;
FILE *fp;
int i;
- regex_init();
-
show_absolute_syms = 0;
show_absolute_relocs = 0;
as_text = 0;
+ use_real_mode = 0;
fname = NULL;
for (i = 1; i < argc; i++) {
char *arg = argv[i];
if (*arg == '-') {
- if (strcmp(argv[1], "--abs-syms") == 0) {
+ if (strcmp(arg, "--abs-syms") == 0) {
show_absolute_syms = 1;
continue;
}
-
- if (strcmp(argv[1], "--abs-relocs") == 0) {
+ if (strcmp(arg, "--abs-relocs") == 0) {
show_absolute_relocs = 1;
continue;
}
- else if (strcmp(argv[1], "--text") == 0) {
+ if (strcmp(arg, "--text") == 0) {
as_text = 1;
continue;
}
+ if (strcmp(arg, "--realmode") == 0) {
+ use_real_mode = 1;
+ continue;
+ }
}
else if (!fname) {
fname = arg;
@@ -655,6 +782,7 @@ int main(int argc, char **argv)
if (!fname) {
usage();
}
+ regex_init(use_real_mode);
fp = fopen(fname, "r");
if (!fp) {
die("Cannot open %s: %s\n",
@@ -673,6 +801,6 @@ int main(int argc, char **argv)
print_absolute_relocs();
return 0;
}
- emit_relocs(as_text);
+ emit_relocs(as_text, use_real_mode);
return 0;
}