diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/asmmacro.h | 12 | ||||
-rw-r--r-- | arch/ia64/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/percpu.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/Makefile.gate | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/gate-data.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/gate.S | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/gate.lds.S | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/init_task.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/ivt.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/minstate.h | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/paravirtentry.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 28 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 1 | ||||
-rw-r--r-- | arch/ia64/kvm/vmm_ivt.S | 2 | ||||
-rw-r--r-- | arch/ia64/scripts/unwcheck.py | 2 | ||||
-rw-r--r-- | arch/ia64/xen/gate-data.S | 2 | ||||
-rw-r--r-- | arch/ia64/xen/xensetup.S | 2 |
17 files changed, 43 insertions, 42 deletions
diff --git a/arch/ia64/include/asm/asmmacro.h b/arch/ia64/include/asm/asmmacro.h index c1642fd64029..3ab6d75aa3db 100644 --- a/arch/ia64/include/asm/asmmacro.h +++ b/arch/ia64/include/asm/asmmacro.h @@ -70,12 +70,12 @@ name: * path (ivt.S - TLB miss processing) or in places where it might not be * safe to use a "tpa" instruction (mca_asm.S - error recovery). */ - .section ".data.patch.vtop", "a" // declare section & section attributes + .section ".data..patch.vtop", "a" // declare section & section attributes .previous #define LOAD_PHYSICAL(pr, reg, obj) \ [1:](pr)movl reg = obj; \ - .xdata4 ".data.patch.vtop", 1b-. + .xdata4 ".data..patch.vtop", 1b-. /* * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it, @@ -84,11 +84,11 @@ name: #define DO_MCKINLEY_E9_WORKAROUND #ifdef DO_MCKINLEY_E9_WORKAROUND - .section ".data.patch.mckinley_e9", "a" + .section ".data..patch.mckinley_e9", "a" .previous /* workaround for Itanium 2 Errata 9: */ # define FSYS_RETURN \ - .xdata4 ".data.patch.mckinley_e9", 1f-.; \ + .xdata4 ".data..patch.mckinley_e9", 1f-.; \ 1:{ .mib; \ nop.m 0; \ mov r16=ar.pfs; \ @@ -107,11 +107,11 @@ name: * If physical stack register size is different from DEF_NUM_STACK_REG, * dynamically patch the kernel for correct size. */ - .section ".data.patch.phys_stack_reg", "a" + .section ".data..patch.phys_stack_reg", "a" .previous #define LOAD_PHYS_STACK_REG_SIZE(reg) \ [1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \ - .xdata4 ".data.patch.phys_stack_reg", 1b-. + .xdata4 ".data..patch.phys_stack_reg", 1b-. /* * Up until early 2004, use of .align within a function caused bad unwind info. diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h index e7482bd628ff..988254a7d349 100644 --- a/arch/ia64/include/asm/cache.h +++ b/arch/ia64/include/asm/cache.h @@ -24,6 +24,6 @@ # define SMP_CACHE_BYTES (1 << 3) #endif -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif /* _ASM_IA64_CACHE_H */ diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 1bd408265694..14aa1c58912b 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -31,7 +31,7 @@ extern void *per_cpu_init(void); #endif /* SMP */ -#define PER_CPU_BASE_SECTION ".data.percpu" +#define PER_CPU_BASE_SECTION ".data..percpu" /* * Be extremely careful when taking the address of this variable! Due to virtual diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate index ab9b03a9adcc..ceeffc509764 100644 --- a/arch/ia64/kernel/Makefile.gate +++ b/arch/ia64/kernel/Makefile.gate @@ -21,7 +21,7 @@ GATECFLAGS_gate-syms.o = -r $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE $(call if_changed,gate) -# gate-data.o contains the gate DSO image as data in section .data.gate. +# gate-data.o contains the gate DSO image as data in section .data..gate. # We must build gate.so before we can assemble it. # Note: kbuild does not track this dependency due to usage of .incbin $(obj)/gate-data.o: $(obj)/gate.so diff --git a/arch/ia64/kernel/gate-data.S b/arch/ia64/kernel/gate-data.S index 258c0a3238fb..b3ef1c72e132 100644 --- a/arch/ia64/kernel/gate-data.S +++ b/arch/ia64/kernel/gate-data.S @@ -1,3 +1,3 @@ - .section .data.gate, "aw" + .section .data..gate, "aw" .incbin "arch/ia64/kernel/gate.so" diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index cf5e0a105e16..245d3e1ec7e1 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S @@ -21,18 +21,18 @@ * to targets outside the shared object) and to avoid multi-phase kernel builds, we * simply create minimalistic "patch lists" in special ELF sections. */ - .section ".data.patch.fsyscall_table", "a" + .section ".data..patch.fsyscall_table", "a" .previous #define LOAD_FSYSCALL_TABLE(reg) \ [1:] movl reg=0; \ - .xdata4 ".data.patch.fsyscall_table", 1b-. + .xdata4 ".data..patch.fsyscall_table", 1b-. - .section ".data.patch.brl_fsys_bubble_down", "a" + .section ".data..patch.brl_fsys_bubble_down", "a" .previous #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ [1:](pr)brl.cond.sptk 0; \ ;; \ - .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-. + .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-. GLOBAL_ENTRY(__kernel_syscall_via_break) .prologue diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index 88c64ed47c36..d32b0855110a 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S @@ -33,21 +33,21 @@ SECTIONS */ . = GATE_ADDR + 0x600; - .data.patch : { + .data..patch : { __paravirt_start_gate_mckinley_e9_patchlist = .; - *(.data.patch.mckinley_e9) + *(.data..patch.mckinley_e9) __paravirt_end_gate_mckinley_e9_patchlist = .; __paravirt_start_gate_vtop_patchlist = .; - *(.data.patch.vtop) + *(.data..patch.vtop) __paravirt_end_gate_vtop_patchlist = .; __paravirt_start_gate_fsyscall_patchlist = .; - *(.data.patch.fsyscall_table) + *(.data..patch.fsyscall_table) __paravirt_end_gate_fsyscall_patchlist = .; __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; - *(.data.patch.brl_fsys_bubble_down) + *(.data..patch.brl_fsys_bubble_down) __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; } :readable diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index e253ab8fcbc8..f9efe9739d3f 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -23,7 +23,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * Initial task structure. * * We need to make sure that this is properly aligned due to the way process stacks are - * handled. This is done by having a special ".data.init_task" section... + * handled. This is done by having a special ".data..init_task" section... */ #define init_thread_info init_task_mem.s.thread_info diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 179fd122e837..d93e396bf599 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -82,7 +82,7 @@ mov r19=n;; /* prepare to save predicates */ \ br.sptk.many dispatch_to_fault_handler - .section .text.ivt,"ax" + .section .text..ivt,"ax" .align 32768 // align on 32KB boundary .global ia64_ivt diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index 292e214a3b84..d56753a11636 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h @@ -16,7 +16,7 @@ #define ACCOUNT_SYS_ENTER #endif -.section ".data.patch.rse", "a" +.section ".data..patch.rse", "a" .previous /* @@ -215,7 +215,7 @@ (pUStk) extr.u r17=r18,3,6; \ (pUStk) sub r16=r18,r22; \ [1:](pKStk) br.cond.sptk.many 1f; \ - .xdata4 ".data.patch.rse",1b-. \ + .xdata4 ".data..patch.rse",1b-. \ ;; \ cmp.ge p6,p7 = 33,r17; \ ;; \ diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S index 6158560d7f17..92d880c4d3d1 100644 --- a/arch/ia64/kernel/paravirtentry.S +++ b/arch/ia64/kernel/paravirtentry.S @@ -28,7 +28,7 @@ #include "entry.h" #define DATA8(sym, init_value) \ - .pushsection .data.read_mostly ; \ + .pushsection .data..read_mostly ; \ .align 8 ; \ .global sym ; \ sym: ; \ diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 1295ba327f6f..e07218a2577f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -8,7 +8,7 @@ #define IVT_TEXT \ VMLINUX_SYMBOL(__start_ivt_text) = .; \ - *(.text.ivt) \ + *(.text..ivt) \ VMLINUX_SYMBOL(__end_ivt_text) = .; OUTPUT_FORMAT("elf64-ia64-little") @@ -54,8 +54,8 @@ SECTIONS .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } #ifdef CONFIG_SMP - .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) - { *(.text.lock) } + .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) + { *(.text..lock) } #endif _etext = .; @@ -75,10 +75,10 @@ SECTIONS __stop___mca_table = .; } - .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET) + .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) { __start___phys_stack_reg_patchlist = .; - *(.data.patch.phys_stack_reg) + *(.data..patch.phys_stack_reg) __end___phys_stack_reg_patchlist = .; } @@ -110,24 +110,24 @@ SECTIONS INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) - .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) + .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) { __start___vtop_patchlist = .; - *(.data.patch.vtop) + *(.data..patch.vtop) __end___vtop_patchlist = .; } - .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) + .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) { __start___rse_patchlist = .; - *(.data.patch.rse) + *(.data..patch.rse) __end___rse_patchlist = .; } - .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) + .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) { __start___mckinley_e9_bundles = .; - *(.data.patch.mckinley_e9) + *(.data..patch.mckinley_e9) __end___mckinley_e9_bundles = .; } @@ -175,17 +175,17 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); __start_gate_section = .; - *(.data.gate) + *(.data..gate) __stop_gate_section = .; #ifdef CONFIG_XEN . = ALIGN(PAGE_SIZE); __xen_start_gate_section = .; - *(.data.gate.xen) + *(.data..gate.xen) __xen_stop_gate_section = .; #endif } diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index d5f4e9161201..21b701374f72 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -144,6 +144,7 @@ int kvm_arch_hardware_enable(void *garbage) VP_INIT_ENV : VP_INIT_ENV_INITALIZE, __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); if (status != 0) { + spin_unlock(&vp_lock); printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); return -EINVAL; } diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S index 40920c630649..24018484c6e9 100644 --- a/arch/ia64/kvm/vmm_ivt.S +++ b/arch/ia64/kvm/vmm_ivt.S @@ -104,7 +104,7 @@ GLOBAL_ENTRY(kvm_vmm_panic) br.call.sptk.many b6=vmm_panic_handler; END(kvm_vmm_panic) - .section .text.ivt,"ax" + .section .text..ivt,"ax" .align 32768 // align on 32KB boundary .global kvm_ia64_ivt diff --git a/arch/ia64/scripts/unwcheck.py b/arch/ia64/scripts/unwcheck.py index c27849889e19..2bfd941ff7c7 100644 --- a/arch/ia64/scripts/unwcheck.py +++ b/arch/ia64/scripts/unwcheck.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python # # Usage: unwcheck.py FILE # diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S index 7d4830afc91d..6f95b6b32a4e 100644 --- a/arch/ia64/xen/gate-data.S +++ b/arch/ia64/xen/gate-data.S @@ -1,3 +1,3 @@ - .section .data.gate.xen, "aw" + .section .data..gate.xen, "aw" .incbin "arch/ia64/xen/gate.so" diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S index aff8346ea193..b820ed02ab9f 100644 --- a/arch/ia64/xen/xensetup.S +++ b/arch/ia64/xen/xensetup.S @@ -14,7 +14,7 @@ #include <linux/init.h> #include <xen/interface/elfnote.h> - .section .data.read_mostly + .section .data..read_mostly .align 8 .global xen_domain_type xen_domain_type: |