diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 40 | ||||
-rw-r--r-- | arch/arm/kernel/calls.S | 1 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 8 | ||||
-rw-r--r-- | arch/arm/kernel/hibernate.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/hyp-stub.S | 24 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/machine_kexec.c | 16 | ||||
-rw-r--r-- | arch/arm/kernel/module.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/reboot.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/topology.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux-xip.lds.S | 316 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 66 |
15 files changed, 398 insertions, 96 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 2c5f160be65e..ad325a8c7e1e 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -88,6 +88,7 @@ obj-$(CONFIG_DEBUG_LL) += debug.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o +AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a ifeq ($(CONFIG_ARM_PSCI),y) obj-$(CONFIG_SMP) += psci_smp.o endif diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 871b8267d211..27d05813ff09 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -170,41 +170,11 @@ int main(void) DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); BLANK(); #ifdef CONFIG_KVM_ARM_HOST - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); - DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); - DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); - DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.host_cpu_context)); - DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); - DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); - DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); - DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); - DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); - DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); - DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); - DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); - DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr)); - DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); - DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); - DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); - DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); - DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); - DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); - DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); - DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); - DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); - DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); - DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); - DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); - DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); - DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); - DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); - DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); - DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); - DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); - DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); - DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); + DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); + DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); + DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); + DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); #endif BLANK(); #ifdef CONFIG_VDSO diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index ac368bb068d1..dfc7cd6851ad 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -400,6 +400,7 @@ CALL(sys_userfaultfd) CALL(sys_membarrier) CALL(sys_mlock2) + CALL(sys_copy_file_range) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 3ce377f7251f..e2550500486d 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -1064,7 +1064,6 @@ ENDPROC(vector_\name) .endm .section .stubs, "ax", %progbits -__stubs_start: @ This must be the first word .word vector_swi @@ -1202,14 +1201,13 @@ vector_addrexcptn: .long __fiq_svc @ e .long __fiq_svc @ f - .globl vector_fiq_offset - .equ vector_fiq_offset, vector_fiq + .globl vector_fiq .section .vectors, "ax", %progbits -__vectors_start: +.L__vectors_start: W(b) vector_rst W(b) vector_und - W(ldr) pc, __vectors_start + 0x1000 + W(ldr) pc, .L__vectors_start + 0x1000 W(b) vector_pabt W(b) vector_dabt W(b) vector_addrexcptn diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c index a71501ff6f18..b09561a6d06a 100644 --- a/arch/arm/kernel/hibernate.c +++ b/arch/arm/kernel/hibernate.c @@ -62,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused) ret = swsusp_save(); if (ret == 0) - _soft_restart(virt_to_phys(cpu_resume), false); + _soft_restart(virt_to_idmap(cpu_resume), false); return ret; } @@ -87,7 +87,7 @@ static void notrace arch_restore_image(void *unused) for (pbe = restore_pblist; pbe; pbe = pbe->next) copy_page(pbe->orig_address, pbe->address); - _soft_restart(virt_to_phys(cpu_resume), false); + _soft_restart(virt_to_idmap(cpu_resume), false); } static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 2a55373f49bf..0b1e4a93d67e 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -17,6 +17,7 @@ */ #include <linux/init.h> +#include <linux/irqchip/arm-gic-v3.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/virt.h> @@ -161,6 +162,29 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE 1: #endif +#ifdef CONFIG_ARM_GIC_V3 + @ Check whether GICv3 system registers are available + mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1 + ubfx r7, r7, #28, #4 + cmp r7, #1 + bne 2f + + @ Enable system register accesses + mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE + orr r7, r7, #(ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE) + mcr p15, 4, r7, c12, c9, 5 @ ICC_HSRE + isb + + @ SRE bit could be forced to 0 by firmware. + @ Check whether it sticks before accessing any other sysreg + mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE + tst r7, #ICC_SRE_EL2_SRE + beq 2f + mov r7, #0 + mcr p15, 4, r7, c12, c11, 0 @ ICH_HCR +2: +#endif + bx lr @ The boot CPU mode is left in r4. ENDPROC(__hyp_stub_install_secondary) diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 1d45320ee125..ece04a457486 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -95,7 +95,7 @@ void __init init_IRQ(void) outer_cache.write_sec = machine_desc->l2c_write_sec; ret = l2x0_of_init(machine_desc->l2c_aux_val, machine_desc->l2c_aux_mask); - if (ret) + if (ret && ret != -ENODEV) pr_err("L2C: failed to init: %d\n", ret); } diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 8bf3b7c09888..59fd0e24c56b 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -143,10 +143,8 @@ void (*kexec_reinit)(void); void machine_kexec(struct kimage *image) { - unsigned long page_list; - unsigned long reboot_code_buffer_phys; - unsigned long reboot_entry = (unsigned long)relocate_new_kernel; - unsigned long reboot_entry_phys; + unsigned long page_list, reboot_entry_phys; + void (*reboot_entry)(void); void *reboot_code_buffer; /* @@ -159,9 +157,6 @@ void machine_kexec(struct kimage *image) page_list = image->head & PAGE_MASK; - /* we need both effective and real address here */ - reboot_code_buffer_phys = - page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ @@ -174,10 +169,11 @@ void machine_kexec(struct kimage *image) /* copy our kernel relocation code to the control code page */ reboot_entry = fncpy(reboot_code_buffer, - reboot_entry, + &relocate_new_kernel, relocate_new_kernel_size); - reboot_entry_phys = (unsigned long)reboot_entry + - (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer); + + /* get the identity mapping physical address for the reboot code */ + reboot_entry_phys = virt_to_idmap(reboot_entry); pr_info("Bye!\n"); diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index efdddcb97dd1..4f14b5ce6535 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -34,7 +34,7 @@ * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off. */ #undef MODULES_VADDR -#define MODULES_VADDR (((unsigned long)_etext + ~PMD_MASK) & PMD_MASK) +#define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK) #endif #ifdef CONFIG_MMU diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c index 38269358fd25..71a2ff9ec490 100644 --- a/arch/arm/kernel/reboot.c +++ b/arch/arm/kernel/reboot.c @@ -50,7 +50,7 @@ static void __soft_restart(void *addr) flush_cache_all(); /* Switch to the identity mapping. */ - phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset); + phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset); phys_reset((unsigned long)addr); /* Should never get here. */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 7d0cba6f1cc5..139791ed473d 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -176,13 +176,13 @@ static struct resource mem_res[] = { .name = "Kernel code", .start = 0, .end = 0, - .flags = IORESOURCE_MEM + .flags = IORESOURCE_SYSTEM_RAM }, { .name = "Kernel data", .start = 0, .end = 0, - .flags = IORESOURCE_MEM + .flags = IORESOURCE_SYSTEM_RAM } }; @@ -851,7 +851,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) res->name = "System RAM"; res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; - res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 37312f6749f3..baee70267f29 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -409,7 +409,7 @@ asmlinkage void secondary_start_kernel(void) /* * OK, it's off to the idle thread for us */ - cpu_startup_entry(CPUHP_ONLINE); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 08b7847bf912..ec279d161b32 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -40,7 +40,7 @@ * to run the rebalance_domains for all idle cores and the cpu_capacity can be * updated during this sequence. */ -static DEFINE_PER_CPU(unsigned long, cpu_scale); +static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) { @@ -306,8 +306,6 @@ void __init init_cpu_topology(void) cpu_topo->socket_id = -1; cpumask_clear(&cpu_topo->core_sibling); cpumask_clear(&cpu_topo->thread_sibling); - - set_capacity_scale(cpu, SCHED_CAPACITY_SCALE); } smp_wmb(); diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S new file mode 100644 index 000000000000..cba1ec899a69 --- /dev/null +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -0,0 +1,316 @@ +/* ld script to make ARM Linux kernel + * taken from the i386 version by Russell King + * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> + */ + +#include <asm-generic/vmlinux.lds.h> +#include <asm/cache.h> +#include <asm/thread_info.h> +#include <asm/memory.h> +#include <asm/page.h> + +#define PROC_INFO \ + . = ALIGN(4); \ + VMLINUX_SYMBOL(__proc_info_begin) = .; \ + *(.proc.info.init) \ + VMLINUX_SYMBOL(__proc_info_end) = .; + +#define IDMAP_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__idmap_text_start) = .; \ + *(.idmap.text) \ + VMLINUX_SYMBOL(__idmap_text_end) = .; \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ + *(.hyp.idmap.text) \ + VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; + +#ifdef CONFIG_HOTPLUG_CPU +#define ARM_CPU_DISCARD(x) +#define ARM_CPU_KEEP(x) x +#else +#define ARM_CPU_DISCARD(x) x +#define ARM_CPU_KEEP(x) +#endif + +#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ + defined(CONFIG_GENERIC_BUG) +#define ARM_EXIT_KEEP(x) x +#define ARM_EXIT_DISCARD(x) +#else +#define ARM_EXIT_KEEP(x) +#define ARM_EXIT_DISCARD(x) x +#endif + +OUTPUT_ARCH(arm) +ENTRY(stext) + +#ifndef __ARMEB__ +jiffies = jiffies_64; +#else +jiffies = jiffies_64 + 4; +#endif + +SECTIONS +{ + /* + * XXX: The linker does not define how output sections are + * assigned to input sections when there are multiple statements + * matching the same input section name. There is no documented + * order of matching. + * + * unwind exit sections must be discarded before the rest of the + * unwind sections get included. + */ + /DISCARD/ : { + *(.ARM.exidx.exit.text) + *(.ARM.extab.exit.text) + ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) + ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) + ARM_EXIT_DISCARD(EXIT_TEXT) + ARM_EXIT_DISCARD(EXIT_DATA) + EXIT_CALL +#ifndef CONFIG_MMU + *(.text.fixup) + *(__ex_table) +#endif +#ifndef CONFIG_SMP_ON_UP + *(.alt.smp.init) +#endif + *(.discard) + *(.discard.*) + } + + . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); + _xiprom = .; /* XIP ROM area to be mapped */ + + .head.text : { + _text = .; + HEAD_TEXT + } + + .text : { /* Real text segment */ + _stext = .; /* Text and read-only data */ + IDMAP_TEXT + __exception_text_start = .; + *(.exception.text) + __exception_text_end = .; + IRQENTRY_TEXT + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + *(.gnu.warning) + *(.glue_7) + *(.glue_7t) + . = ALIGN(4); + *(.got) /* Global offset table */ + ARM_CPU_KEEP(PROC_INFO) + } + + RO_DATA(PAGE_SIZE) + + . = ALIGN(4); + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { + __start___ex_table = .; +#ifdef CONFIG_MMU + *(__ex_table) +#endif + __stop___ex_table = .; + } + +#ifdef CONFIG_ARM_UNWIND + /* + * Stack unwinding tables + */ + . = ALIGN(8); + .ARM.unwind_idx : { + __start_unwind_idx = .; + *(.ARM.exidx*) + __stop_unwind_idx = .; + } + .ARM.unwind_tab : { + __start_unwind_tab = .; + *(.ARM.extab*) + __stop_unwind_tab = .; + } +#endif + + NOTES + + _etext = .; /* End of text and rodata section */ + + /* + * The vectors and stubs are relocatable code, and the + * only thing that matters is their relative offsets + */ + __vectors_start = .; + .vectors 0xffff0000 : AT(__vectors_start) { + *(.vectors) + } + . = __vectors_start + SIZEOF(.vectors); + __vectors_end = .; + + __stubs_start = .; + .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { + *(.stubs) + } + . = __stubs_start + SIZEOF(.stubs); + __stubs_end = .; + + PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors)); + + INIT_TEXT_SECTION(8) + .exit.text : { + ARM_EXIT_KEEP(EXIT_TEXT) + } + .init.proc.info : { + ARM_CPU_DISCARD(PROC_INFO) + } + .init.arch.info : { + __arch_info_begin = .; + *(.arch.info.init) + __arch_info_end = .; + } + .init.tagtable : { + __tagtable_begin = .; + *(.taglist.init) + __tagtable_end = .; + } +#ifdef CONFIG_SMP_ON_UP + .init.smpalt : { + __smpalt_begin = .; + *(.alt.smp.init) + __smpalt_end = .; + } +#endif + .init.pv_table : { + __pv_table_begin = .; + *(.pv_table) + __pv_table_end = .; + } + .init.data : { + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + SECURITY_INITCALL + INIT_RAM_FS + } + +#ifdef CONFIG_SMP + PERCPU_SECTION(L1_CACHE_BYTES) +#endif + + _exiprom = .; /* End of XIP ROM area */ + __data_loc = ALIGN(4); /* location in binary */ + . = PAGE_OFFSET + TEXT_OFFSET; + + .data : AT(__data_loc) { + _data = .; /* address in memory */ + _sdata = .; + + /* + * first, the init task union, aligned + * to an 8192 byte boundary. + */ + INIT_TASK_DATA(THREAD_SIZE) + + . = ALIGN(PAGE_SIZE); + __init_begin = .; + INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) + . = ALIGN(PAGE_SIZE); + __init_end = .; + + NOSAVE_DATA + CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) + READ_MOSTLY_DATA(L1_CACHE_BYTES) + + /* + * and the usual data section + */ + DATA_DATA + CONSTRUCTORS + + _edata = .; + } + _edata_loc = __data_loc + SIZEOF(.data); + +#ifdef CONFIG_HAVE_TCM + /* + * We align everything to a page boundary so we can + * free it after init has commenced and TCM contents have + * been copied to its destination. + */ + .tcm_start : { + . = ALIGN(PAGE_SIZE); + __tcm_start = .; + __itcm_start = .; + } + + /* + * Link these to the ITCM RAM + * Put VMA to the TCM address and LMA to the common RAM + * and we'll upload the contents from RAM to TCM and free + * the used RAM after that. + */ + .text_itcm ITCM_OFFSET : AT(__itcm_start) + { + __sitcm_text = .; + *(.tcm.text) + *(.tcm.rodata) + . = ALIGN(4); + __eitcm_text = .; + } + + /* + * Reset the dot pointer, this is needed to create the + * relative __dtcm_start below (to be used as extern in code). + */ + . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm); + + .dtcm_start : { + __dtcm_start = .; + } + + /* TODO: add remainder of ITCM as well, that can be used for data! */ + .data_dtcm DTCM_OFFSET : AT(__dtcm_start) + { + . = ALIGN(4); + __sdtcm_data = .; + *(.tcm.data) + . = ALIGN(4); + __edtcm_data = .; + } + + /* Reset the dot pointer or the linker gets confused */ + . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm); + + /* End marker for freeing TCM copy in linked object */ + .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){ + . = ALIGN(PAGE_SIZE); + __tcm_end = .; + } +#endif + + BSS_SECTION(0, 0, 0) + _end = .; + + STABS_DEBUG +} + +/* + * These must never be empty + * If you have to comment these two assert statements out, your + * binutils is too old (for other reasons as well) + */ +ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") +ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") + +/* + * The HYP init code can't be more than a page long, + * and should not cross a page boundary. + * The above comment applies as well. + */ +ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, + "HYP init code too big or misaligned") diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 8b60fde5ce48..1fab979daeaf 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -3,14 +3,16 @@ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> */ +#ifdef CONFIG_XIP_KERNEL +#include "vmlinux-xip.lds.S" +#else + #include <asm-generic/vmlinux.lds.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/page.h> -#ifdef CONFIG_ARM_KERNMEM_PERMS #include <asm/pgtable.h> -#endif #define PROC_INFO \ . = ALIGN(4); \ @@ -18,6 +20,11 @@ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; +#define HYPERVISOR_TEXT \ + VMLINUX_SYMBOL(__hyp_text_start) = .; \ + *(.hyp.text) \ + VMLINUX_SYMBOL(__hyp_text_end) = .; + #define IDMAP_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__idmap_text_start) = .; \ @@ -84,17 +91,13 @@ SECTIONS *(.discard.*) } -#ifdef CONFIG_XIP_KERNEL - . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); -#else . = PAGE_OFFSET + TEXT_OFFSET; -#endif .head.text : { _text = .; HEAD_TEXT } -#ifdef CONFIG_ARM_KERNMEM_PERMS +#ifdef CONFIG_DEBUG_RODATA . = ALIGN(1<<SECTION_SHIFT); #endif @@ -108,6 +111,7 @@ SECTIONS TEXT_TEXT SCHED_TEXT LOCK_TEXT + HYPERVISOR_TEXT KPROBES_TEXT *(.gnu.warning) *(.glue_7) @@ -117,7 +121,7 @@ SECTIONS ARM_CPU_KEEP(PROC_INFO) } -#ifdef CONFIG_DEBUG_RODATA +#ifdef CONFIG_DEBUG_ALIGN_RODATA . = ALIGN(1<<SECTION_SHIFT); #endif RO_DATA(PAGE_SIZE) @@ -152,32 +156,33 @@ SECTIONS _etext = .; /* End of text and rodata section */ -#ifndef CONFIG_XIP_KERNEL -# ifdef CONFIG_ARM_KERNMEM_PERMS +#ifdef CONFIG_DEBUG_RODATA . = ALIGN(1<<SECTION_SHIFT); -# else +#else . = ALIGN(PAGE_SIZE); -# endif - __init_begin = .; #endif + __init_begin = .; + /* * The vectors and stubs are relocatable code, and the * only thing that matters is their relative offsets */ __vectors_start = .; - .vectors 0 : AT(__vectors_start) { + .vectors 0xffff0000 : AT(__vectors_start) { *(.vectors) } . = __vectors_start + SIZEOF(.vectors); __vectors_end = .; __stubs_start = .; - .stubs 0x1000 : AT(__stubs_start) { + .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { *(.stubs) } . = __stubs_start + SIZEOF(.stubs); __stubs_end = .; + PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors)); + INIT_TEXT_SECTION(8) .exit.text : { ARM_EXIT_KEEP(EXIT_TEXT) @@ -208,37 +213,28 @@ SECTIONS __pv_table_end = .; } .init.data : { -#ifndef CONFIG_XIP_KERNEL INIT_DATA -#endif INIT_SETUP(16) INIT_CALLS CON_INITCALL SECURITY_INITCALL INIT_RAM_FS } -#ifndef CONFIG_XIP_KERNEL .exit.data : { ARM_EXIT_KEEP(EXIT_DATA) } -#endif #ifdef CONFIG_SMP PERCPU_SECTION(L1_CACHE_BYTES) #endif -#ifdef CONFIG_XIP_KERNEL - __data_loc = ALIGN(4); /* location in binary */ - . = PAGE_OFFSET + TEXT_OFFSET; -#else -#ifdef CONFIG_ARM_KERNMEM_PERMS +#ifdef CONFIG_DEBUG_RODATA . = ALIGN(1<<SECTION_SHIFT); #else . = ALIGN(THREAD_SIZE); #endif __init_end = .; __data_loc = .; -#endif .data : AT(__data_loc) { _data = .; /* address in memory */ @@ -250,15 +246,6 @@ SECTIONS */ INIT_TASK_DATA(THREAD_SIZE) -#ifdef CONFIG_XIP_KERNEL - . = ALIGN(PAGE_SIZE); - __init_begin = .; - INIT_DATA - ARM_EXIT_KEEP(EXIT_DATA) - . = ALIGN(PAGE_SIZE); - __init_end = .; -#endif - NOSAVE_DATA CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) READ_MOSTLY_DATA(L1_CACHE_BYTES) @@ -336,6 +323,15 @@ SECTIONS STABS_DEBUG } +#ifdef CONFIG_DEBUG_RODATA +/* + * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will + * be the first section-aligned location after __start_rodata. Otherwise, + * it will be equal to __start_rodata. + */ +__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT); +#endif + /* * These must never be empty * If you have to comment these two assert statements out, your @@ -351,3 +347,5 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") */ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, "HYP init code too big or misaligned") + +#endif /* CONFIG_XIP_KERNEL */ |