diff options
Diffstat (limited to 'include')
115 files changed, 1316 insertions, 1579 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h index 20f523954218..6016632d032f 100644 --- a/include/asm-i386/acpi.h +++ b/include/asm-i386/acpi.h @@ -131,21 +131,7 @@ static inline void disable_acpi(void) extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); #ifdef CONFIG_X86_IO_APIC -extern int skip_ioapic_setup; extern int acpi_skip_timer_override; - -static inline void disable_ioapic_setup(void) -{ - skip_ioapic_setup = 1; -} - -static inline int ioapic_setup_disabled(void) -{ - return skip_ioapic_setup; -} - -#else -static inline void disable_ioapic_setup(void) { } #endif static inline void acpi_noirq_set(void) { acpi_noirq = 1; } diff --git a/include/asm-i386/alternative-asm.i b/include/asm-i386/alternative-asm.i new file mode 100644 index 000000000000..6c47e3b9484b --- /dev/null +++ b/include/asm-i386/alternative-asm.i @@ -0,0 +1,14 @@ +#include <linux/config.h> + +#ifdef CONFIG_SMP + .macro LOCK_PREFIX +1: lock + .section .smp_locks,"a" + .align 4 + .long 1b + .previous + .endm +#else + .macro LOCK_PREFIX + .endm +#endif diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index 2c1e371cebb6..3a42b7d6fc92 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h @@ -16,20 +16,8 @@ #define APIC_VERBOSE 1 #define APIC_DEBUG 2 -extern int enable_local_apic; extern int apic_verbosity; -static inline void lapic_disable(void) -{ - enable_local_apic = -1; - clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); -} - -static inline void lapic_enable(void) -{ - enable_local_apic = 1; -} - /* * Define the default level of output to be very little * This can be turned up by using apic=verbose for more @@ -42,6 +30,8 @@ static inline void lapic_enable(void) } while (0) +extern void generic_apic_probe(void); + #ifdef CONFIG_X86_LOCAL_APIC /* @@ -117,8 +107,6 @@ extern void enable_APIC_timer(void); extern void enable_NMI_through_LVT0 (void * dummy); -extern int disable_timer_pin_1; - void smp_send_timer_broadcast_ipi(struct pt_regs *regs); void switch_APIC_timer_to_ipi(void *cpumask); void switch_ipi_to_APIC_timer(void *cpumask); diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h index 89b8b82c82b3..5874ef119ffd 100644 --- a/include/asm-i386/desc.h +++ b/include/asm-i386/desc.h @@ -33,50 +33,99 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; } +/* + * This is the ldt that every process will get unless we need + * something other than this. + */ +extern struct desc_struct default_ldt[]; +extern struct desc_struct idt_table[]; +extern void set_intr_gate(unsigned int irq, void * addr); + +static inline void pack_descriptor(__u32 *a, __u32 *b, + unsigned long base, unsigned long limit, unsigned char type, unsigned char flags) +{ + *a = ((base & 0xffff) << 16) | (limit & 0xffff); + *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | + (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20); +} + +static inline void pack_gate(__u32 *a, __u32 *b, + unsigned long base, unsigned short seg, unsigned char type, unsigned char flags) +{ + *a = (seg << 16) | (base & 0xffff); + *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff); +} + +#define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */ +#define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */ +#define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */ +#define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */ +#define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */ +#define DESCTYPE_DPL3 0x60 /* DPL-3 */ +#define DESCTYPE_S 0x10 /* !system */ + #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) -#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr)) -#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt)) +#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) +#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) -#define store_tr(tr) __asm__ ("str %0":"=mr" (tr)) -#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt)) +#define store_tr(tr) __asm__ ("str %0":"=m" (tr)) +#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) -/* - * This is the ldt that every process will get unless we need - * something other than this. - */ -extern struct desc_struct default_ldt[]; -extern void set_intr_gate(unsigned int irq, void * addr); +#if TLS_SIZE != 24 +# error update this code. +#endif -#define _set_tssldt_desc(n,addr,limit,type) \ -__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ - "movw %w1,2(%2)\n\t" \ - "rorl $16,%1\n\t" \ - "movb %b1,4(%2)\n\t" \ - "movb %4,5(%2)\n\t" \ - "movb $0,6(%2)\n\t" \ - "movb %h1,7(%2)\n\t" \ - "rorl $16,%1" \ - : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type)) - -static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) +static inline void load_TLS(struct thread_struct *t, unsigned int cpu) { - _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr, - offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); +#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] + C(0); C(1); C(2); +#undef C } -#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) +static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) +{ + __u32 *lp = (__u32 *)((char *)dt + entry*8); + *lp = entry_a; + *(lp+1) = entry_b; +} + +#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) +#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) +#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) + +static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) +{ + __u32 a, b; + pack_gate(&a, &b, (unsigned long)addr, seg, type, 0); + write_idt_entry(idt_table, gate, a, b); +} -static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) +static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr) { - _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); + __u32 a, b; + pack_descriptor(&a, &b, (unsigned long)addr, + offsetof(struct tss_struct, __cacheline_filler) - 1, + DESCTYPE_TSS, 0); + write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); } +static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries) +{ + __u32 a, b; + pack_descriptor(&a, &b, (unsigned long)addr, + entries * sizeof(struct desc_struct) - 1, + DESCTYPE_LDT, 0); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); +} + +#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) + #define LDT_entry_a(info) \ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) @@ -102,24 +151,6 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) (info)->seg_not_present == 1 && \ (info)->useable == 0 ) -static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b) -{ - __u32 *lp = (__u32 *)((char *)ldt + entry*8); - *lp = entry_a; - *(lp+1) = entry_b; -} - -#if TLS_SIZE != 24 -# error update this code. -#endif - -static inline void load_TLS(struct thread_struct *t, unsigned int cpu) -{ -#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] - C(0); C(1); C(2); -#undef C -} - static inline void clear_LDT(void) { int cpu = get_cpu(); diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h index 2280f6272f80..6d66398a307d 100644 --- a/include/asm-i386/dwarf2.h +++ b/include/asm-i386/dwarf2.h @@ -1,8 +1,6 @@ #ifndef _DWARF2_H #define _DWARF2_H -#include <linux/config.h> - #ifndef __ASSEMBLY__ #warning "asm/dwarf2.h should be only included in pure assembly files" #endif @@ -28,6 +26,13 @@ #define CFI_RESTORE .cfi_restore #define CFI_REMEMBER_STATE .cfi_remember_state #define CFI_RESTORE_STATE .cfi_restore_state +#define CFI_UNDEFINED .cfi_undefined + +#ifdef CONFIG_AS_CFI_SIGNAL_FRAME +#define CFI_SIGNAL_FRAME .cfi_signal_frame +#else +#define CFI_SIGNAL_FRAME +#endif #else @@ -48,6 +53,8 @@ #define CFI_RESTORE ignore #define CFI_REMEMBER_STATE ignore #define CFI_RESTORE_STATE ignore +#define CFI_UNDEFINED ignore +#define CFI_SIGNAL_FRAME ignore #endif diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h index ca82acb8cb1f..f7514fb6e8e4 100644 --- a/include/asm-i386/e820.h +++ b/include/asm-i386/e820.h @@ -18,7 +18,7 @@ #define E820_RAM 1 #define E820_RESERVED 2 -#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ +#define E820_ACPI 3 #define E820_NVS 4 #define HIGH_MEMORY (1024*1024) diff --git a/include/asm-i386/frame.i b/include/asm-i386/frame.i new file mode 100644 index 000000000000..4d68ddce18b6 --- /dev/null +++ b/include/asm-i386/frame.i @@ -0,0 +1,24 @@ +#include <linux/config.h> +#include <asm/dwarf2.h> + +/* The annotation hides the frame from the unwinder and makes it look + like a ordinary ebp save/restore. This avoids some special cases for + frame pointer later */ +#ifdef CONFIG_FRAME_POINTER + .macro FRAME + pushl %ebp + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ebp,0 + movl %esp,%ebp + .endm + .macro ENDFRAME + popl %ebp + CFI_ADJUST_CFA_OFFSET -4 + CFI_RESTORE ebp + .endm +#else + .macro FRAME + .endm + .macro ENDFRAME + .endm +#endif diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h index b3783a32abee..8ffbb0f07457 100644 --- a/include/asm-i386/genapic.h +++ b/include/asm-i386/genapic.h @@ -1,6 +1,8 @@ #ifndef _ASM_GENAPIC_H #define _ASM_GENAPIC_H 1 +#include <asm/mpspec.h> + /* * Generic APIC driver interface. * @@ -63,14 +65,25 @@ struct genapic { unsigned (*get_apic_id)(unsigned long x); unsigned long apic_id_mask; unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); - + +#ifdef CONFIG_SMP /* ipi */ void (*send_IPI_mask)(cpumask_t mask, int vector); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); +#endif }; -#define APICFUNC(x) .x = x +#define APICFUNC(x) .x = x, + +/* More functions could be probably marked IPIFUNC and save some space + in UP GENERICARCH kernels, but I don't have the nerve right now + to untangle this mess. -AK */ +#ifdef CONFIG_SMP +#define IPIFUNC(x) APICFUNC(x) +#else +#define IPIFUNC(x) +#endif #define APIC_INIT(aname, aprobe) { \ .name = aname, \ @@ -80,33 +93,33 @@ struct genapic { .no_balance_irq = NO_BALANCE_IRQ, \ .ESR_DISABLE = esr_disable, \ .apic_destination_logical = APIC_DEST_LOGICAL, \ - APICFUNC(apic_id_registered), \ - APICFUNC(target_cpus), \ - APICFUNC(check_apicid_used), \ - APICFUNC(check_apicid_present), \ - APICFUNC(init_apic_ldr), \ - APICFUNC(ioapic_phys_id_map), \ - APICFUNC(clustered_apic_check), \ - APICFUNC(multi_timer_check), \ - APICFUNC(apicid_to_node), \ - APICFUNC(cpu_to_logical_apicid), \ - APICFUNC(cpu_present_to_apicid), \ - APICFUNC(apicid_to_cpu_present), \ - APICFUNC(mpc_apic_id), \ - APICFUNC(setup_portio_remap), \ - APICFUNC(check_phys_apicid_present), \ - APICFUNC(mpc_oem_bus_info), \ - APICFUNC(mpc_oem_pci_bus), \ - APICFUNC(mps_oem_check), \ - APICFUNC(get_apic_id), \ + APICFUNC(apic_id_registered) \ + APICFUNC(target_cpus) \ + APICFUNC(check_apicid_used) \ + APICFUNC(check_apicid_present) \ + APICFUNC(init_apic_ldr) \ + APICFUNC(ioapic_phys_id_map) \ + APICFUNC(clustered_apic_check) \ + APICFUNC(multi_timer_check) \ + APICFUNC(apicid_to_node) \ + APICFUNC(cpu_to_logical_apicid) \ + APICFUNC(cpu_present_to_apicid) \ + APICFUNC(apicid_to_cpu_present) \ + APICFUNC(mpc_apic_id) \ + APICFUNC(setup_portio_remap) \ + APICFUNC(check_phys_apicid_present) \ + APICFUNC(mpc_oem_bus_info) \ + APICFUNC(mpc_oem_pci_bus) \ + APICFUNC(mps_oem_check) \ + APICFUNC(get_apic_id) \ .apic_id_mask = APIC_ID_MASK, \ - APICFUNC(cpu_mask_to_apicid), \ - APICFUNC(acpi_madt_oem_check), \ - APICFUNC(send_IPI_mask), \ - APICFUNC(send_IPI_allbutself), \ - APICFUNC(send_IPI_all), \ - APICFUNC(enable_apic_mode), \ - APICFUNC(phys_pkg_id), \ + APICFUNC(cpu_mask_to_apicid) \ + APICFUNC(acpi_madt_oem_check) \ + IPIFUNC(send_IPI_mask) \ + IPIFUNC(send_IPI_allbutself) \ + IPIFUNC(send_IPI_all) \ + APICFUNC(enable_apic_mode) \ + APICFUNC(phys_pkg_id) \ } extern struct genapic *genapic; diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h index 134ea9cc5283..b52cd60a075b 100644 --- a/include/asm-i386/intel_arch_perfmon.h +++ b/include/asm-i386/intel_arch_perfmon.h @@ -14,6 +14,18 @@ #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ + (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) + +union cpuid10_eax { + struct { + unsigned int version_id:8; + unsigned int num_counters:8; + unsigned int bit_width:8; + unsigned int mask_length:8; + } split; + unsigned int full; +}; #endif /* X86_INTEL_ARCH_PERFMON_H */ diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h index 5092e819b8a2..5d309275a1dc 100644 --- a/include/asm-i386/io_apic.h +++ b/include/asm-i386/io_apic.h @@ -188,6 +188,16 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned /* 1 if "noapic" boot option passed */ extern int skip_ioapic_setup; +static inline void disable_ioapic_setup(void) +{ + skip_ioapic_setup = 1; +} + +static inline int ioapic_setup_disabled(void) +{ + return skip_ioapic_setup; +} + /* * If we use the IO-APIC for IRQ routing, disable automatic * assignment of PCI IRQ's. @@ -206,6 +216,7 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq); #else /* !CONFIG_X86_IO_APIC */ #define io_apic_assign_pci_irqs 0 +static inline void disable_ioapic_setup(void) { } #endif extern int assign_irq_vector(int irq); diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h index 53f0e06672dc..4dfc9f5ed031 100644 --- a/include/asm-i386/kexec.h +++ b/include/asm-i386/kexec.h @@ -1,6 +1,26 @@ #ifndef _I386_KEXEC_H #define _I386_KEXEC_H +#define PA_CONTROL_PAGE 0 +#define VA_CONTROL_PAGE 1 +#define PA_PGD 2 +#define VA_PGD 3 +#define PA_PTE_0 4 +#define VA_PTE_0 5 +#define PA_PTE_1 6 +#define VA_PTE_1 7 +#ifdef CONFIG_X86_PAE +#define PA_PMD_0 8 +#define VA_PMD_0 9 +#define PA_PMD_1 10 +#define VA_PMD_1 11 +#define PAGES_NR 12 +#else +#define PAGES_NR 8 +#endif + +#ifndef __ASSEMBLY__ + #include <asm/fixmap.h> #include <asm/ptrace.h> #include <asm/string.h> @@ -72,5 +92,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs, newregs->eip = (unsigned long)current_text_addr(); } } +asmlinkage NORET_TYPE void +relocate_kernel(unsigned long indirection_page, + unsigned long control_page, + unsigned long start_address, + unsigned int has_pae) ATTRIB_NORET; + +#endif /* __ASSEMBLY__ */ #endif /* _I386_KEXEC_H */ diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h index b5f3f0d0b2bc..26333685a7fb 100644 --- a/include/asm-i386/mach-es7000/mach_apic.h +++ b/include/asm-i386/mach-es7000/mach_apic.h @@ -123,9 +123,13 @@ extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { +#ifdef CONFIG_SMP if (cpu >= NR_CPUS) return BAD_APICID; return (int)cpu_2_logical_apicid[cpu]; +#else + return logical_smp_processor_id(); +#endif } static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h index 9fd073286289..a81b05961595 100644 --- a/include/asm-i386/mach-summit/mach_apic.h +++ b/include/asm-i386/mach-summit/mach_apic.h @@ -46,10 +46,12 @@ extern u8 cpu_2_logical_apicid[]; static inline void init_apic_ldr(void) { unsigned long val, id; - int i, count; - u8 lid; + int count = 0; u8 my_id = (u8)hard_smp_processor_id(); u8 my_cluster = (u8)apicid_cluster(my_id); +#ifdef CONFIG_SMP + u8 lid; + int i; /* Create logical APIC IDs by counting CPUs already in cluster. */ for (count = 0, i = NR_CPUS; --i >= 0; ) { @@ -57,6 +59,7 @@ static inline void init_apic_ldr(void) if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) ++count; } +#endif /* We only have a 4 wide bitmap in cluster mode. If a deranged * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); @@ -91,9 +94,13 @@ static inline int apicid_to_node(int logical_apicid) /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { +#ifdef CONFIG_SMP if (cpu >= NR_CPUS) return BAD_APICID; return (int)cpu_2_logical_apicid[cpu]; +#else + return logical_smp_processor_id(); +#endif } static inline int cpu_present_to_apicid(int mps_cpu) diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h index 05a538531229..7a17d9e58ad6 100644 --- a/include/asm-i386/mutex.h +++ b/include/asm-i386/mutex.h @@ -30,14 +30,10 @@ do { \ \ __asm__ __volatile__( \ LOCK_PREFIX " decl (%%eax) \n" \ - " js 2f \n" \ + " jns 1f \n" \ + " call "#fail_fn" \n" \ "1: \n" \ \ - LOCK_SECTION_START("") \ - "2: call "#fail_fn" \n" \ - " jmp 1b \n" \ - LOCK_SECTION_END \ - \ :"=a" (dummy) \ : "a" (count) \ : "memory", "ecx", "edx"); \ @@ -86,14 +82,10 @@ do { \ \ __asm__ __volatile__( \ LOCK_PREFIX " incl (%%eax) \n" \ - " jle 2f \n" \ + " jg 1f \n" \ + " call "#fail_fn" \n" \ "1: \n" \ \ - LOCK_SECTION_START("") \ - "2: call "#fail_fn" \n" \ - " jmp 1b \n" \ - LOCK_SECTION_END \ - \ :"=a" (dummy) \ : "a" (count) \ : "memory", "ecx", "edx"); \ diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index 67d994799999..303bcd4592bb 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h @@ -6,32 +6,29 @@ #include <linux/pm.h> -struct pt_regs; - -typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); - /** - * set_nmi_callback + * do_nmi_callback * - * Set a handler for an NMI. Only one handler may be - * set. Return 1 if the NMI was handled. + * Check to see if a callback exists and execute it. Return 1 + * if the handler exists and was handled successfully. */ -void set_nmi_callback(nmi_callback_t callback); - -/** - * unset_nmi_callback - * - * Remove the handler previously set. - */ -void unset_nmi_callback(void); - -extern void setup_apic_nmi_watchdog (void); -extern int reserve_lapic_nmi(void); -extern void release_lapic_nmi(void); +int do_nmi_callback(struct pt_regs *regs, int cpu); + +extern int nmi_watchdog_enabled; +extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); +extern int avail_to_resrv_perfctr_nmi(unsigned int); +extern int reserve_perfctr_nmi(unsigned int); +extern void release_perfctr_nmi(unsigned int); +extern int reserve_evntsel_nmi(unsigned int); +extern void release_evntsel_nmi(unsigned int); + +extern void setup_apic_nmi_watchdog (void *); +extern void stop_apic_nmi_watchdog (void *); extern void disable_timer_nmi_watchdog(void); extern void enable_timer_nmi_watchdog(void); -extern void nmi_watchdog_tick (struct pt_regs * regs); +extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); +extern atomic_t nmi_active; extern unsigned int nmi_watchdog; #define NMI_DEFAULT -1 #define NMI_NONE 0 diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 0dc051a8078b..541b3e234335 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -411,8 +411,6 @@ extern pte_t *lookup_address(unsigned long address); static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} #endif -extern void noexec_setup(const char *str); - #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index 1910880fcd40..a4a0e5207db5 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h @@ -27,6 +27,7 @@ struct pt_regs { #ifdef __KERNEL__ #include <asm/vm86.h> +#include <asm/segment.h> struct task_struct; extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); @@ -40,18 +41,14 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int erro */ static inline int user_mode(struct pt_regs *regs) { - return (regs->xcs & 3) != 0; + return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL; } static inline int user_mode_vm(struct pt_regs *regs) { - return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0; + return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; } #define instruction_pointer(regs) ((regs)->eip) -#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) extern unsigned long profile_pc(struct pt_regs *regs); -#else -#define profile_pc(regs) instruction_pointer(regs) -#endif #endif /* __KERNEL__ */ #endif diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h index 87c069ccba08..c3e5db32fa48 100644 --- a/include/asm-i386/rwlock.h +++ b/include/asm-i386/rwlock.h @@ -20,52 +20,6 @@ #define RW_LOCK_BIAS 0x01000000 #define RW_LOCK_BIAS_STR "0x01000000" -#define __build_read_lock_ptr(rw, helper) \ - asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" \ - "jns 1f\n" \ - "call " helper "\n\t" \ - "1:\n" \ - ::"a" (rw) : "memory") - -#define __build_read_lock_const(rw, helper) \ - asm volatile(LOCK_PREFIX " subl $1,%0\n\t" \ - "jns 1f\n" \ - "pushl %%eax\n\t" \ - "leal %0,%%eax\n\t" \ - "call " helper "\n\t" \ - "popl %%eax\n\t" \ - "1:\n" \ - :"+m" (*(volatile int *)rw) : : "memory") - -#define __build_read_lock(rw, helper) do { \ - if (__builtin_constant_p(rw)) \ - __build_read_lock_const(rw, helper); \ - else \ - __build_read_lock_ptr(rw, helper); \ - } while (0) - -#define __build_write_lock_ptr(rw, helper) \ - asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ - "jz 1f\n" \ - "call " helper "\n\t" \ - "1:\n" \ - ::"a" (rw) : "memory") - -#define __build_write_lock_const(rw, helper) \ - asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ - "jz 1f\n" \ - "pushl %%eax\n\t" \ - "leal %0,%%eax\n\t" \ - "call " helper "\n\t" \ - "popl %%eax\n\t" \ - "1:\n" \ - :"+m" (*(volatile int *)rw) : : "memory") - -#define __build_write_lock(rw, helper) do { \ - if (__builtin_constant_p(rw)) \ - __build_write_lock_const(rw, helper); \ - else \ - __build_write_lock_ptr(rw, helper); \ - } while (0) +/* Code is in asm-i386/spinlock.h */ #endif diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h index 43113f5608eb..bc598d6388e3 100644 --- a/include/asm-i386/rwsem.h +++ b/include/asm-i386/rwsem.h @@ -99,17 +99,9 @@ static inline void __down_read(struct rw_semaphore *sem) __asm__ __volatile__( "# beginning down_read\n\t" LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ - " js 2f\n\t" /* jump if we weren't granted the lock */ + " jns 1f\n" + " call call_rwsem_down_read_failed\n" "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " pushl %%ecx\n\t" - " pushl %%edx\n\t" - " call rwsem_down_read_failed\n\t" - " popl %%edx\n\t" - " popl %%ecx\n\t" - " jmp 1b\n" - LOCK_SECTION_END "# ending down_read\n\t" : "+m" (sem->count) : "a" (sem) @@ -151,15 +143,9 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) "# beginning down_write\n\t" LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ " testl %%edx,%%edx\n\t" /* was the count 0 before? */ - " jnz 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " pushl %%ecx\n\t" - " call rwsem_down_write_failed\n\t" - " popl %%ecx\n\t" - " jmp 1b\n" - LOCK_SECTION_END + " jz 1f\n" + " call call_rwsem_down_write_failed\n" + "1:\n" "# ending down_write" : "+m" (sem->count), "=d" (tmp) : "a" (sem), "1" (tmp) @@ -193,17 +179,9 @@ static inline void __up_read(struct rw_semaphore *sem) __asm__ __volatile__( "# beginning __up_read\n\t" LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ - " js 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " decw %%dx\n\t" /* do nothing if still outstanding active readers */ - " jnz 1b\n\t" - " pushl %%ecx\n\t" - " call rwsem_wake\n\t" - " popl %%ecx\n\t" - " jmp 1b\n" - LOCK_SECTION_END + " jns 1f\n\t" + " call call_rwsem_wake\n" + "1:\n" "# ending __up_read\n" : "+m" (sem->count), "=d" (tmp) : "a" (sem), "1" (tmp) @@ -219,17 +197,9 @@ static inline void __up_write(struct rw_semaphore *sem) "# beginning __up_write\n\t" " movl %2,%%edx\n\t" LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ - " jnz 2f\n\t" /* jump if the lock is being waited upon */ + " jz 1f\n" + " call call_rwsem_wake\n" "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " decw %%dx\n\t" /* did the active count reduce to 0? */ - " jnz 1b\n\t" /* jump back if not */ - " pushl %%ecx\n\t" - " call rwsem_wake\n\t" - " popl %%ecx\n\t" - " jmp 1b\n" - LOCK_SECTION_END "# ending __up_write\n" : "+m" (sem->count) : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) @@ -244,17 +214,9 @@ static inline void __downgrade_write(struct rw_semaphore *sem) __asm__ __volatile__( "# beginning __downgrade_write\n\t" LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ - " js 2f\n\t" /* jump if the lock is being waited upon */ + " jns 1f\n\t" + " call call_rwsem_downgrade_wake\n" "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " pushl %%ecx\n\t" - " pushl %%edx\n\t" - " call rwsem_downgrade_wake\n\t" - " popl %%edx\n\t" - " popl %%ecx\n\t" - " jmp 1b\n" - LOCK_SECTION_END "# ending __downgrade_write\n" : "+m" (sem->count) : "a" (sem), "i" (-RWSEM_WAITING_BIAS) diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h index faf995307b9e..b7ab59685ba7 100644 --- a/include/asm-i386/segment.h +++ b/include/asm-i386/segment.h @@ -83,6 +83,11 @@ #define GDT_SIZE (GDT_ENTRIES * 8) +/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */ +#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8) +/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) + /* Simple and small GDT entries for booting only */ #define GDT_ENTRY_BOOT_CS 2 @@ -112,4 +117,16 @@ */ #define IDT_ENTRIES 256 +/* Bottom two bits of selector give the ring privilege level */ +#define SEGMENT_RPL_MASK 0x3 +/* Bit 2 is table indicator (LDT/GDT) */ +#define SEGMENT_TI_MASK 0x4 + +/* User mode is privilege level 3 */ +#define USER_RPL 0x3 +/* LDT segment has TI set, GDT has it cleared */ +#define SEGMENT_LDT 0x4 +#define SEGMENT_GDT 0x0 + +#define get_kernel_rpl() 0 #endif diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h index d51e800acf29..e63b6a68f04c 100644 --- a/include/asm-i386/semaphore.h +++ b/include/asm-i386/semaphore.h @@ -100,13 +100,10 @@ static inline void down(struct semaphore * sem) __asm__ __volatile__( "# atomic down operation\n\t" LOCK_PREFIX "decl %0\n\t" /* --sem->count */ - "js 2f\n" - "1:\n" - LOCK_SECTION_START("") - "2:\tlea %0,%%eax\n\t" - "call __down_failed\n\t" - "jmp 1b\n" - LOCK_SECTION_END + "jns 2f\n" + "\tlea %0,%%eax\n\t" + "call __down_failed\n" + "2:" :"+m" (sem->count) : :"memory","ax"); @@ -123,15 +120,12 @@ static inline int down_interruptible(struct semaphore * sem) might_sleep(); __asm__ __volatile__( "# atomic interruptible down operation\n\t" + "xorl %0,%0\n\t" LOCK_PREFIX "decl %1\n\t" /* --sem->count */ - "js 2f\n\t" - "xorl %0,%0\n" - "1:\n" - LOCK_SECTION_START("") - "2:\tlea %1,%%eax\n\t" - "call __down_failed_interruptible\n\t" - "jmp 1b\n" - LOCK_SECTION_END + "jns 2f\n\t" + "lea %1,%%eax\n\t" + "call __down_failed_interruptible\n" + "2:" :"=a" (result), "+m" (sem->count) : :"memory"); @@ -148,15 +142,12 @@ static inline int down_trylock(struct semaphore * sem) __asm__ __volatile__( "# atomic interruptible down operation\n\t" + "xorl %0,%0\n\t" LOCK_PREFIX "decl %1\n\t" /* --sem->count */ - "js 2f\n\t" - "xorl %0,%0\n" - "1:\n" - LOCK_SECTION_START("") - "2:\tlea %1,%%eax\n\t" + "jns 2f\n\t" + "lea %1,%%eax\n\t" "call __down_failed_trylock\n\t" - "jmp 1b\n" - LOCK_SECTION_END + "2:\n" :"=a" (result), "+m" (sem->count) : :"memory"); @@ -166,22 +157,16 @@ static inline int down_trylock(struct semaphore * sem) /* * Note! This is subtle. We jump to wake people up only if * the semaphore was negative (== somebody was waiting on it). - * The default case (no contention) will result in NO - * jumps for both down() and up(). */ static inline void up(struct semaphore * sem) { __asm__ __volatile__( "# atomic up operation\n\t" LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ - "jle 2f\n" - "1:\n" - LOCK_SECTION_START("") - "2:\tlea %0,%%eax\n\t" - "call __up_wakeup\n\t" - "jmp 1b\n" - LOCK_SECTION_END - ".subsection 0\n" + "jg 1f\n\t" + "lea %0,%%eax\n\t" + "call __up_wakeup\n" + "1:" :"+m" (sem->count) : :"memory","ax"); diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index 142d10e34ade..32ac8c91d5c5 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h @@ -80,17 +80,12 @@ static inline int hard_smp_processor_id(void) return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); } #endif - -static __inline int logical_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); -} - #endif extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); +extern unsigned int num_processors; + #endif /* !__ASSEMBLY__ */ #else /* CONFIG_SMP */ @@ -100,4 +95,15 @@ extern void __cpu_die(unsigned int cpu); #define NO_PROC_ID 0xFF /* No processor magic marker */ #endif + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_X86_LOCAL_APIC +static __inline int logical_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); +} +#endif +#endif + #endif diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index d1020363c41a..b0b3043f05e1 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -4,8 +4,12 @@ #include <asm/atomic.h> #include <asm/rwlock.h> #include <asm/page.h> +#include <asm/processor.h> #include <linux/compiler.h> +#define CLI_STRING "cli" +#define STI_STRING "sti" + /* * Your basic SMP spinlocks, allowing only a single CPU anywhere * @@ -17,67 +21,64 @@ * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) \ - (*(volatile signed char *)(&(x)->slock) <= 0) - -#define __raw_spin_lock_string \ - "\n1:\t" \ - LOCK_PREFIX " ; decb %0\n\t" \ - "jns 3f\n" \ - "2:\t" \ - "rep;nop\n\t" \ - "cmpb $0,%0\n\t" \ - "jle 2b\n\t" \ - "jmp 1b\n" \ - "3:\n\t" - -/* - * NOTE: there's an irqs-on section here, which normally would have to be - * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use - * __raw_spin_lock_string_flags(). - */ -#define __raw_spin_lock_string_flags \ - "\n1:\t" \ - LOCK_PREFIX " ; decb %0\n\t" \ - "jns 5f\n" \ - "2:\t" \ - "testl $0x200, %1\n\t" \ - "jz 4f\n\t" \ - "sti\n" \ - "3:\t" \ - "rep;nop\n\t" \ - "cmpb $0, %0\n\t" \ - "jle 3b\n\t" \ - "cli\n\t" \ - "jmp 1b\n" \ - "4:\t" \ - "rep;nop\n\t" \ - "cmpb $0, %0\n\t" \ - "jg 1b\n\t" \ - "jmp 4b\n" \ - "5:\n\t" +static inline int __raw_spin_is_locked(raw_spinlock_t *x) +{ + return *(volatile signed char *)(&(x)->slock) <= 0; +} static inline void __raw_spin_lock(raw_spinlock_t *lock) { - asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory"); + asm volatile("\n1:\t" + LOCK_PREFIX " ; decb %0\n\t" + "jns 3f\n" + "2:\t" + "rep;nop\n\t" + "cmpb $0,%0\n\t" + "jle 2b\n\t" + "jmp 1b\n" + "3:\n\t" + : "+m" (lock->slock) : : "memory"); } /* * It is easier for the lock validator if interrupts are not re-enabled * in the middle of a lock-acquire. This is a performance feature anyway * so we turn it off: + * + * NOTE: there's an irqs-on section here, which normally would have to be + * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. */ #ifndef CONFIG_PROVE_LOCKING static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { - asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory"); + asm volatile( + "\n1:\t" + LOCK_PREFIX " ; decb %0\n\t" + "jns 5f\n" + "2:\t" + "testl $0x200, %1\n\t" + "jz 4f\n\t" + STI_STRING "\n" + "3:\t" + "rep;nop\n\t" + "cmpb $0, %0\n\t" + "jle 3b\n\t" + CLI_STRING "\n\t" + "jmp 1b\n" + "4:\t" + "rep;nop\n\t" + "cmpb $0, %0\n\t" + "jg 1b\n\t" + "jmp 4b\n" + "5:\n\t" + : "+m" (lock->slock) : "r" (flags) : "memory"); } #endif static inline int __raw_spin_trylock(raw_spinlock_t *lock) { char oldval; - __asm__ __volatile__( + asm volatile( "xchgb %b0,%1" :"=q" (oldval), "+m" (lock->slock) :"0" (0) : "memory"); @@ -93,38 +94,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) -#define __raw_spin_unlock_string \ - "movb $1,%0" \ - :"+m" (lock->slock) : : "memory" - - static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - __raw_spin_unlock_string - ); + asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); } #else -#define __raw_spin_unlock_string \ - "xchgb %b0, %1" \ - :"=q" (oldval), "+m" (lock->slock) \ - :"0" (oldval) : "memory" - static inline void __raw_spin_unlock(raw_spinlock_t *lock) { char oldval = 1; - __asm__ __volatile__( - __raw_spin_unlock_string - ); + asm volatile("xchgb %b0, %1" + : "=q" (oldval), "+m" (lock->slock) + : "0" (oldval) : "memory"); } #endif -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +{ + while (__raw_spin_is_locked(lock)) + cpu_relax(); +} /* * Read-write spinlocks, allowing multiple readers @@ -151,22 +143,36 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock > 0) +static inline int __raw_read_can_lock(raw_rwlock_t *x) +{ + return (int)(x)->lock > 0; +} /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +static inline int __raw_write_can_lock(raw_rwlock_t *x) +{ + return (x)->lock == RW_LOCK_BIAS; +} static inline void __raw_read_lock(raw_rwlock_t *rw) { - __build_read_lock(rw, "__read_lock_failed"); + asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" + "jns 1f\n" + "call __read_lock_failed\n\t" + "1:\n" + ::"a" (rw) : "memory"); } static inline void __raw_write_lock(raw_rwlock_t *rw) { - __build_write_lock(rw, "__write_lock_failed"); + asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" + "jz 1f\n" + "call __write_lock_failed\n\t" + "1:\n" + ::"a" (rw) : "memory"); } static inline int __raw_read_trylock(raw_rwlock_t *lock) diff --git a/include/asm-i386/stacktrace.h b/include/asm-i386/stacktrace.h new file mode 100644 index 000000000000..7d1f6a5cbfca --- /dev/null +++ b/include/asm-i386/stacktrace.h @@ -0,0 +1 @@ +#include <asm-x86_64/stacktrace.h> diff --git a/include/asm-i386/therm_throt.h b/include/asm-i386/therm_throt.h new file mode 100644 index 000000000000..399bf6026b16 --- /dev/null +++ b/include/asm-i386/therm_throt.h @@ -0,0 +1,9 @@ +#ifndef __ASM_I386_THERM_THROT_H__ +#define __ASM_I386_THERM_THROT_H__ 1 + +#include <asm/atomic.h> + +extern atomic_t therm_throt_en; +int therm_throt_process(int curr); + +#endif /* __ASM_I386_THERM_THROT_H__ */ diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h index d57ca5c540b6..360648b0f2b3 100644 --- a/include/asm-i386/tlbflush.h +++ b/include/asm-i386/tlbflush.h @@ -36,8 +36,6 @@ : "memory"); \ } while (0) -extern unsigned long pgkern_mask; - # define __flush_tlb_all() \ do { \ if (cpu_has_pge) \ @@ -49,7 +47,7 @@ extern unsigned long pgkern_mask; #define cpu_has_invlpg (boot_cpu_data.x86 > 3) #define __flush_tlb_single(addr) \ - __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) + __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") #ifdef CONFIG_X86_INVLPG # define __flush_tlb_one(addr) __flush_tlb_single(addr) diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h index 97b828ce31e0..c13933185c1c 100644 --- a/include/asm-i386/tsc.h +++ b/include/asm-i386/tsc.h @@ -6,7 +6,6 @@ #ifndef _ASM_i386_TSC_H #define _ASM_i386_TSC_H -#include <linux/config.h> #include <asm/processor.h> /* diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index fc1c8ddae149..565d0897b205 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -323,10 +323,11 @@ #define __NR_tee 315 #define __NR_vmsplice 316 #define __NR_move_pages 317 +#define __NR_getcpu 318 #ifdef __KERNEL__ -#define NR_syscalls 318 +#define NR_syscalls 319 /* * user-visible error numbers are in the range -1 - -128: see diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h index 4c1a0b968569..5031d693b89d 100644 --- a/include/asm-i386/unwind.h +++ b/include/asm-i386/unwind.h @@ -18,6 +18,7 @@ struct unwind_frame_info { struct pt_regs regs; struct task_struct *task; + unsigned call_frame:1; }; #define UNW_PC(frame) (frame)->regs.eip @@ -28,6 +29,8 @@ struct unwind_frame_info #define FRAME_LINK_OFFSET 0 #define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0) #define STACK_TOP(tsk) ((tsk)->thread.esp0) +#else +#define UNW_FP(frame) ((void)(frame), 0) #endif #define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) @@ -42,6 +45,10 @@ struct unwind_frame_info PTREGS_INFO(edi), \ PTREGS_INFO(eip) +#define UNW_DEFAULT_RA(raItem, dataAlign) \ + ((raItem).where == Memory && \ + !((raItem).value * (dataAlign) + 4)) + static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, /*const*/ struct pt_regs *regs) { @@ -88,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info) #define UNW_PC(frame) ((void)(frame), 0) #define UNW_SP(frame) ((void)(frame), 0) +#define UNW_FP(frame) ((void)(frame), 0) static inline int arch_unw_user_mode(const void *info) { diff --git a/include/asm-ia64/module.h b/include/asm-ia64/module.h index 85c82bd819f2..d2da61e4c49b 100644 --- a/include/asm-ia64/module.h +++ b/include/asm-ia64/module.h @@ -28,7 +28,8 @@ struct mod_arch_specific { #define Elf_Ehdr Elf64_Ehdr #define MODULE_PROC_FAMILY "ia64" -#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY \ + "gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__) #define ARCH_SHF_SMALL SHF_IA_64_SHORT diff --git a/include/asm-mips/Kbuild b/include/asm-mips/Kbuild index c68e1680da01..7897f05e3165 100644 --- a/include/asm-mips/Kbuild +++ b/include/asm-mips/Kbuild @@ -1 +1,3 @@ include include/asm-generic/Kbuild.asm + +header-y += cachectl.h sgidefs.h sysmips.h diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h index 3b745e76f429..78c35ec46362 100644 --- a/include/asm-mips/bootinfo.h +++ b/include/asm-mips/bootinfo.h @@ -112,8 +112,7 @@ * Valid machtype for group GALILEO */ #define MACH_GROUP_GALILEO 11 /* Galileo Eval Boards */ -#define MACH_EV96100 0 /* EV96100 */ -#define MACH_EV64120A 1 /* EV64120A */ +#define MACH_EV64120A 0 /* EV64120A */ /* * Valid machtype for group MOMENCO diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h index 47bc8f6c20d2..36416fdfcf68 100644 --- a/include/asm-mips/cacheflush.h +++ b/include/asm-mips/cacheflush.h @@ -21,7 +21,6 @@ * - flush_cache_range(vma, start, end) flushes a range of pages * - flush_icache_range(start, end) flush a range of instructions * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache - * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache * * MIPS specific flush operations: * @@ -39,7 +38,7 @@ extern void __flush_dcache_page(struct page *page); static inline void flush_dcache_page(struct page *page) { - if (cpu_has_dc_aliases) + if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) __flush_dcache_page(page); } @@ -47,8 +46,13 @@ static inline void flush_dcache_page(struct page *page) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) -extern void (*flush_icache_page)(struct vm_area_struct *vma, +extern void (*__flush_icache_page)(struct vm_area_struct *vma, struct page *page); +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ +} + extern void (*flush_icache_range)(unsigned long start, unsigned long end); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() @@ -60,7 +64,7 @@ static inline void copy_to_user_page(struct vm_area_struct *vma, if (cpu_has_dc_aliases) flush_cache_page(vma, vaddr, page_to_pfn(page)); memcpy(dst, src, len); - flush_icache_page(vma, page); + __flush_icache_page(vma, page); } static inline void copy_from_user_page(struct vm_area_struct *vma, diff --git a/include/asm-mips/fcntl.h b/include/asm-mips/fcntl.h index 787220e6c1fc..00a50ec1c19f 100644 --- a/include/asm-mips/fcntl.h +++ b/include/asm-mips/fcntl.h @@ -25,8 +25,6 @@ #define F_SETOWN 24 /* for sockets. */ #define F_GETOWN 23 /* for sockets. */ -#define F_SETSIG 10 /* for sockets. */ -#define F_GETSIG 11 /* for sockets. */ #ifndef __mips64 #define F_GETLK64 33 /* using 'struct flock64' */ diff --git a/include/asm-mips/galileo-boards/gt96100.h b/include/asm-mips/galileo-boards/gt96100.h deleted file mode 100644 index aabd1b629c19..000000000000 --- a/include/asm-mips/galileo-boards/gt96100.h +++ /dev/null @@ -1,427 +0,0 @@ -/* - * Copyright 2000 MontaVista Software Inc. - * Author: MontaVista Software, Inc. - * stevel@mvista.com or source@mvista.com - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * Register offsets of the MIPS GT96100 Advanced Communication Controller. - */ -#ifndef _GT96100_H -#define _GT96100_H - -/* - * Galileo GT96100 internal register base. - */ -#define MIPS_GT96100_BASE (KSEG1ADDR(0x14000000)) - -#define GT96100_WRITE(ofs, data) \ - *(volatile u32 *)(MIPS_GT96100_BASE+ofs) = cpu_to_le32(data) -#define GT96100_READ(ofs) \ - le32_to_cpu(*(volatile u32 *)(MIPS_GT96100_BASE+ofs)) - -#define GT96100_ETH_IO_SIZE 0x4000 - -/************************************************************************ - * Register offset addresses follow - ************************************************************************/ - -/* CPU Interface Control Registers */ -#define GT96100_CPU_INTERF_CONFIG 0x000000 - -/* Ethernet Ports */ -#define GT96100_ETH_PHY_ADDR_REG 0x080800 -#define GT96100_ETH_SMI_REG 0x080810 -/* - These are offsets to port 0 registers. Add GT96100_ETH_IO_SIZE to - get offsets to port 1 registers. -*/ -#define GT96100_ETH_PORT_CONFIG 0x084800 -#define GT96100_ETH_PORT_CONFIG_EXT 0x084808 -#define GT96100_ETH_PORT_COMM 0x084810 -#define GT96100_ETH_PORT_STATUS 0x084818 -#define GT96100_ETH_SER_PARAM 0x084820 -#define GT96100_ETH_HASH_TBL_PTR 0x084828 -#define GT96100_ETH_FLOW_CNTRL_SRC_ADDR_L 0x084830 -#define GT96100_ETH_FLOW_CNTRL_SRC_ADDR_H 0x084838 -#define GT96100_ETH_SDMA_CONFIG 0x084840 -#define GT96100_ETH_SDMA_COMM 0x084848 -#define GT96100_ETH_INT_CAUSE 0x084850 -#define GT96100_ETH_INT_MASK 0x084858 -#define GT96100_ETH_1ST_RX_DESC_PTR0 0x084880 -#define GT96100_ETH_1ST_RX_DESC_PTR1 0x084884 -#define GT96100_ETH_1ST_RX_DESC_PTR2 0x084888 -#define GT96100_ETH_1ST_RX_DESC_PTR3 0x08488C -#define GT96100_ETH_CURR_RX_DESC_PTR0 0x0848A0 -#define GT96100_ETH_CURR_RX_DESC_PTR1 0x0848A4 -#define GT96100_ETH_CURR_RX_DESC_PTR2 0x0848A8 -#define GT96100_ETH_CURR_RX_DESC_PTR3 0x0848AC -#define GT96100_ETH_CURR_TX_DESC_PTR0 0x0848E0 -#define GT96100_ETH_CURR_TX_DESC_PTR1 0x0848E4 -#define GT96100_ETH_MIB_COUNT_BASE 0x085800 - -/* SDMAs */ -#define GT96100_SDMA_GROUP_CONFIG 0x101AF0 -/* SDMA Group 0 */ -#define GT96100_SDMA_G0_CHAN0_CONFIG 0x000900 -#define GT96100_SDMA_G0_CHAN0_COMM 0x000908 -#define GT96100_SDMA_G0_CHAN0_RX_DESC_BASE 0x008900 -#define GT96100_SDMA_G0_CHAN0_CURR_RX_DESC_PTR 0x008910 -#define GT96100_SDMA_G0_CHAN0_TX_DESC_BASE 0x00C900 -#define GT96100_SDMA_G0_CHAN0_CURR_TX_DESC_PTR 0x00C910 -#define GT96100_SDMA_G0_CHAN0_1ST_TX_DESC_PTR 0x00C914 -#define GT96100_SDMA_G0_CHAN1_CONFIG 0x010900 -#define GT96100_SDMA_G0_CHAN1_COMM 0x010908 -#define GT96100_SDMA_G0_CHAN1_RX_DESC_BASE 0x018900 -#define GT96100_SDMA_G0_CHAN1_CURR_RX_DESC_PTR 0x018910 -#define GT96100_SDMA_G0_CHAN1_TX_DESC_BASE 0x01C900 -#define GT96100_SDMA_G0_CHAN1_CURR_TX_DESC_PTR 0x01C910 -#define GT96100_SDMA_G0_CHAN1_1ST_TX_DESC_PTR 0x01C914 -#define GT96100_SDMA_G0_CHAN2_CONFIG 0x020900 -#define GT96100_SDMA_G0_CHAN2_COMM 0x020908 -#define GT96100_SDMA_G0_CHAN2_RX_DESC_BASE 0x028900 -#define GT96100_SDMA_G0_CHAN2_CURR_RX_DESC_PTR 0x028910 -#define GT96100_SDMA_G0_CHAN2_TX_DESC_BASE 0x02C900 -#define GT96100_SDMA_G0_CHAN2_CURR_TX_DESC_PTR 0x02C910 -#define GT96100_SDMA_G0_CHAN2_1ST_TX_DESC_PTR 0x02C914 -#define GT96100_SDMA_G0_CHAN3_CONFIG 0x030900 -#define GT96100_SDMA_G0_CHAN3_COMM 0x030908 -#define GT96100_SDMA_G0_CHAN3_RX_DESC_BASE 0x038900 -#define GT96100_SDMA_G0_CHAN3_CURR_RX_DESC_PTR 0x038910 -#define GT96100_SDMA_G0_CHAN3_TX_DESC_BASE 0x03C900 -#define GT96100_SDMA_G0_CHAN3_CURR_TX_DESC_PTR 0x03C910 -#define GT96100_SDMA_G0_CHAN3_1ST_TX_DESC_PTR 0x03C914 -#define GT96100_SDMA_G0_CHAN4_CONFIG 0x040900 -#define GT96100_SDMA_G0_CHAN4_COMM 0x040908 -#define GT96100_SDMA_G0_CHAN4_RX_DESC_BASE 0x048900 -#define GT96100_SDMA_G0_CHAN4_CURR_RX_DESC_PTR 0x048910 -#define GT96100_SDMA_G0_CHAN4_TX_DESC_BASE 0x04C900 -#define GT96100_SDMA_G0_CHAN4_CURR_TX_DESC_PTR 0x04C910 -#define GT96100_SDMA_G0_CHAN4_1ST_TX_DESC_PTR 0x04C914 -#define GT96100_SDMA_G0_CHAN5_CONFIG 0x050900 -#define GT96100_SDMA_G0_CHAN5_COMM 0x050908 -#define GT96100_SDMA_G0_CHAN5_RX_DESC_BASE 0x058900 -#define GT96100_SDMA_G0_CHAN5_CURR_RX_DESC_PTR 0x058910 -#define GT96100_SDMA_G0_CHAN5_TX_DESC_BASE 0x05C900 -#define GT96100_SDMA_G0_CHAN5_CURR_TX_DESC_PTR 0x05C910 -#define GT96100_SDMA_G0_CHAN5_1ST_TX_DESC_PTR 0x05C914 -#define GT96100_SDMA_G0_CHAN6_CONFIG 0x060900 -#define GT96100_SDMA_G0_CHAN6_COMM 0x060908 -#define GT96100_SDMA_G0_CHAN6_RX_DESC_BASE 0x068900 -#define GT96100_SDMA_G0_CHAN6_CURR_RX_DESC_PTR 0x068910 -#define GT96100_SDMA_G0_CHAN6_TX_DESC_BASE 0x06C900 -#define GT96100_SDMA_G0_CHAN6_CURR_TX_DESC_PTR 0x06C910 -#define GT96100_SDMA_G0_CHAN6_1ST_TX_DESC_PTR 0x06C914 -#define GT96100_SDMA_G0_CHAN7_CONFIG 0x070900 -#define GT96100_SDMA_G0_CHAN7_COMM 0x070908 -#define GT96100_SDMA_G0_CHAN7_RX_DESC_BASE 0x078900 -#define GT96100_SDMA_G0_CHAN7_CURR_RX_DESC_PTR 0x078910 -#define GT96100_SDMA_G0_CHAN7_TX_DESC_BASE 0x07C900 -#define GT96100_SDMA_G0_CHAN7_CURR_TX_DESC_PTR 0x07C910 -#define GT96100_SDMA_G0_CHAN7_1ST_TX_DESC_PTR 0x07C914 -/* SDMA Group 1 */ -#define GT96100_SDMA_G1_CHAN0_CONFIG 0x100900 -#define GT96100_SDMA_G1_CHAN0_COMM 0x100908 -#define GT96100_SDMA_G1_CHAN0_RX_DESC_BASE 0x108900 -#define GT96100_SDMA_G1_CHAN0_CURR_RX_DESC_PTR 0x108910 -#define GT96100_SDMA_G1_CHAN0_TX_DESC_BASE 0x10C900 -#define GT96100_SDMA_G1_CHAN0_CURR_TX_DESC_PTR 0x10C910 -#define GT96100_SDMA_G1_CHAN0_1ST_TX_DESC_PTR 0x10C914 -#define GT96100_SDMA_G1_CHAN1_CONFIG 0x110900 -#define GT96100_SDMA_G1_CHAN1_COMM 0x110908 -#define GT96100_SDMA_G1_CHAN1_RX_DESC_BASE 0x118900 -#define GT96100_SDMA_G1_CHAN1_CURR_RX_DESC_PTR 0x118910 -#define GT96100_SDMA_G1_CHAN1_TX_DESC_BASE 0x11C900 -#define GT96100_SDMA_G1_CHAN1_CURR_TX_DESC_PTR 0x11C910 -#define GT96100_SDMA_G1_CHAN1_1ST_TX_DESC_PTR 0x11C914 -#define GT96100_SDMA_G1_CHAN2_CONFIG 0x120900 -#define GT96100_SDMA_G1_CHAN2_COMM 0x120908 -#define GT96100_SDMA_G1_CHAN2_RX_DESC_BASE 0x128900 -#define GT96100_SDMA_G1_CHAN2_CURR_RX_DESC_PTR 0x128910 -#define GT96100_SDMA_G1_CHAN2_TX_DESC_BASE 0x12C900 -#define GT96100_SDMA_G1_CHAN2_CURR_TX_DESC_PTR 0x12C910 -#define GT96100_SDMA_G1_CHAN2_1ST_TX_DESC_PTR 0x12C914 -#define GT96100_SDMA_G1_CHAN3_CONFIG 0x130900 -#define GT96100_SDMA_G1_CHAN3_COMM 0x130908 -#define GT96100_SDMA_G1_CHAN3_RX_DESC_BASE 0x138900 -#define GT96100_SDMA_G1_CHAN3_CURR_RX_DESC_PTR 0x138910 -#define GT96100_SDMA_G1_CHAN3_TX_DESC_BASE 0x13C900 -#define GT96100_SDMA_G1_CHAN3_CURR_TX_DESC_PTR 0x13C910 -#define GT96100_SDMA_G1_CHAN3_1ST_TX_DESC_PTR 0x13C914 -#define GT96100_SDMA_G1_CHAN4_CONFIG 0x140900 -#define GT96100_SDMA_G1_CHAN4_COMM 0x140908 -#define GT96100_SDMA_G1_CHAN4_RX_DESC_BASE 0x148900 -#define GT96100_SDMA_G1_CHAN4_CURR_RX_DESC_PTR 0x148910 -#define GT96100_SDMA_G1_CHAN4_TX_DESC_BASE 0x14C900 -#define GT96100_SDMA_G1_CHAN4_CURR_TX_DESC_PTR 0x14C910 -#define GT96100_SDMA_G1_CHAN4_1ST_TX_DESC_PTR 0x14C914 -#define GT96100_SDMA_G1_CHAN5_CONFIG 0x150900 -#define GT96100_SDMA_G1_CHAN5_COMM 0x150908 -#define GT96100_SDMA_G1_CHAN5_RX_DESC_BASE 0x158900 -#define GT96100_SDMA_G1_CHAN5_CURR_RX_DESC_PTR 0x158910 -#define GT96100_SDMA_G1_CHAN5_TX_DESC_BASE 0x15C900 -#define GT96100_SDMA_G1_CHAN5_CURR_TX_DESC_PTR 0x15C910 -#define GT96100_SDMA_G1_CHAN5_1ST_TX_DESC_PTR 0x15C914 -#define GT96100_SDMA_G1_CHAN6_CONFIG 0x160900 -#define GT96100_SDMA_G1_CHAN6_COMM 0x160908 -#define GT96100_SDMA_G1_CHAN6_RX_DESC_BASE 0x168900 -#define GT96100_SDMA_G1_CHAN6_CURR_RX_DESC_PTR 0x168910 -#define GT96100_SDMA_G1_CHAN6_TX_DESC_BASE 0x16C900 -#define GT96100_SDMA_G1_CHAN6_CURR_TX_DESC_PTR 0x16C910 -#define GT96100_SDMA_G1_CHAN6_1ST_TX_DESC_PTR 0x16C914 -#define GT96100_SDMA_G1_CHAN7_CONFIG 0x170900 -#define GT96100_SDMA_G1_CHAN7_COMM 0x170908 -#define GT96100_SDMA_G1_CHAN7_RX_DESC_BASE 0x178900 -#define GT96100_SDMA_G1_CHAN7_CURR_RX_DESC_PTR 0x178910 -#define GT96100_SDMA_G1_CHAN7_TX_DESC_BASE 0x17C900 -#define GT96100_SDMA_G1_CHAN7_CURR_TX_DESC_PTR 0x17C910 -#define GT96100_SDMA_G1_CHAN7_1ST_TX_DESC_PTR 0x17C914 -/* MPSCs */ -#define GT96100_MPSC0_MAIN_CONFIG_LOW 0x000A00 -#define GT96100_MPSC0_MAIN_CONFIG_HIGH 0x000A04 -#define GT96100_MPSC0_PROTOCOL_CONFIG 0x000A08 -#define GT96100_MPSC_CHAN0_REG1 0x000A0C -#define GT96100_MPSC_CHAN0_REG2 0x000A10 -#define GT96100_MPSC_CHAN0_REG3 0x000A14 -#define GT96100_MPSC_CHAN0_REG4 0x000A18 -#define GT96100_MPSC_CHAN0_REG5 0x000A1C -#define GT96100_MPSC_CHAN0_REG6 0x000A20 -#define GT96100_MPSC_CHAN0_REG7 0x000A24 -#define GT96100_MPSC_CHAN0_REG8 0x000A28 -#define GT96100_MPSC_CHAN0_REG9 0x000A2C -#define GT96100_MPSC_CHAN0_REG10 0x000A30 -#define GT96100_MPSC_CHAN0_REG11 0x000A34 -#define GT96100_MPSC1_MAIN_CONFIG_LOW 0x008A00 -#define GT96100_MPSC1_MAIN_CONFIG_HIGH 0x008A04 -#define GT96100_MPSC1_PROTOCOL_CONFIG 0x008A08 -#define GT96100_MPSC_CHAN1_REG1 0x008A0C -#define GT96100_MPSC_CHAN1_REG2 0x008A10 -#define GT96100_MPSC_CHAN1_REG3 0x008A14 -#define GT96100_MPSC_CHAN1_REG4 0x008A18 -#define GT96100_MPSC_CHAN1_REG5 0x008A1C -#define GT96100_MPSC_CHAN1_REG6 0x008A20 -#define GT96100_MPSC_CHAN1_REG7 0x008A24 -#define GT96100_MPSC_CHAN1_REG8 0x008A28 -#define GT96100_MPSC_CHAN1_REG9 0x008A2C -#define GT96100_MPSC_CHAN1_REG10 0x008A30 -#define GT96100_MPSC_CHAN1_REG11 0x008A34 -#define GT96100_MPSC2_MAIN_CONFIG_LOW 0x010A00 -#define GT96100_MPSC2_MAIN_CONFIG_HIGH 0x010A04 -#define GT96100_MPSC2_PROTOCOL_CONFIG 0x010A08 -#define GT96100_MPSC_CHAN2_REG1 0x010A0C -#define GT96100_MPSC_CHAN2_REG2 0x010A10 -#define GT96100_MPSC_CHAN2_REG3 0x010A14 -#define GT96100_MPSC_CHAN2_REG4 0x010A18 -#define GT96100_MPSC_CHAN2_REG5 0x010A1C -#define GT96100_MPSC_CHAN2_REG6 0x010A20 -#define GT96100_MPSC_CHAN2_REG7 0x010A24 -#define GT96100_MPSC_CHAN2_REG8 0x010A28 -#define GT96100_MPSC_CHAN2_REG9 0x010A2C -#define GT96100_MPSC_CHAN2_REG10 0x010A30 -#define GT96100_MPSC_CHAN2_REG11 0x010A34 -#define GT96100_MPSC3_MAIN_CONFIG_LOW 0x018A00 -#define GT96100_MPSC3_MAIN_CONFIG_HIGH 0x018A04 -#define GT96100_MPSC3_PROTOCOL_CONFIG 0x018A08 -#define GT96100_MPSC_CHAN3_REG1 0x018A0C -#define GT96100_MPSC_CHAN3_REG2 0x018A10 -#define GT96100_MPSC_CHAN3_REG3 0x018A14 -#define GT96100_MPSC_CHAN3_REG4 0x018A18 -#define GT96100_MPSC_CHAN3_REG5 0x018A1C -#define GT96100_MPSC_CHAN3_REG6 0x018A20 -#define GT96100_MPSC_CHAN3_REG7 0x018A24 -#define GT96100_MPSC_CHAN3_REG8 0x018A28 -#define GT96100_MPSC_CHAN3_REG9 0x018A2C -#define GT96100_MPSC_CHAN3_REG10 0x018A30 -#define GT96100_MPSC_CHAN3_REG11 0x018A34 -#define GT96100_MPSC4_MAIN_CONFIG_LOW 0x020A00 -#define GT96100_MPSC4_MAIN_CONFIG_HIGH 0x020A04 -#define GT96100_MPSC4_PROTOCOL_CONFIG 0x020A08 -#define GT96100_MPSC_CHAN4_REG1 0x020A0C -#define GT96100_MPSC_CHAN4_REG2 0x020A10 -#define GT96100_MPSC_CHAN4_REG3 0x020A14 -#define GT96100_MPSC_CHAN4_REG4 0x020A18 -#define GT96100_MPSC_CHAN4_REG5 0x020A1C -#define GT96100_MPSC_CHAN4_REG6 0x020A20 -#define GT96100_MPSC_CHAN4_REG7 0x020A24 -#define GT96100_MPSC_CHAN4_REG8 0x020A28 -#define GT96100_MPSC_CHAN4_REG9 0x020A2C -#define GT96100_MPSC_CHAN4_REG10 0x020A30 -#define GT96100_MPSC_CHAN4_REG11 0x020A34 -#define GT96100_MPSC5_MAIN_CONFIG_LOW 0x028A00 -#define GT96100_MPSC5_MAIN_CONFIG_HIGH 0x028A04 -#define GT96100_MPSC5_PROTOCOL_CONFIG 0x028A08 -#define GT96100_MPSC_CHAN5_REG1 0x028A0C -#define GT96100_MPSC_CHAN5_REG2 0x028A10 -#define GT96100_MPSC_CHAN5_REG3 0x028A14 -#define GT96100_MPSC_CHAN5_REG4 0x028A18 -#define GT96100_MPSC_CHAN5_REG5 0x028A1C -#define GT96100_MPSC_CHAN5_REG6 0x028A20 -#define GT96100_MPSC_CHAN5_REG7 0x028A24 -#define GT96100_MPSC_CHAN5_REG8 0x028A28 -#define GT96100_MPSC_CHAN5_REG9 0x028A2C -#define GT96100_MPSC_CHAN5_REG10 0x028A30 -#define GT96100_MPSC_CHAN5_REG11 0x028A34 -#define GT96100_MPSC6_MAIN_CONFIG_LOW 0x030A00 -#define GT96100_MPSC6_MAIN_CONFIG_HIGH 0x030A04 -#define GT96100_MPSC6_PROTOCOL_CONFIG 0x030A08 -#define GT96100_MPSC_CHAN6_REG1 0x030A0C -#define GT96100_MPSC_CHAN6_REG2 0x030A10 -#define GT96100_MPSC_CHAN6_REG3 0x030A14 -#define GT96100_MPSC_CHAN6_REG4 0x030A18 -#define GT96100_MPSC_CHAN6_REG5 0x030A1C -#define GT96100_MPSC_CHAN6_REG6 0x030A20 -#define GT96100_MPSC_CHAN6_REG7 0x030A24 -#define GT96100_MPSC_CHAN6_REG8 0x030A28 -#define GT96100_MPSC_CHAN6_REG9 0x030A2C -#define GT96100_MPSC_CHAN6_REG10 0x030A30 -#define GT96100_MPSC_CHAN6_REG11 0x030A34 -#define GT96100_MPSC7_MAIN_CONFIG_LOW 0x038A00 -#define GT96100_MPSC7_MAIN_CONFIG_HIGH 0x038A04 -#define GT96100_MPSC7_PROTOCOL_CONFIG 0x038A08 -#define GT96100_MPSC_CHAN7_REG1 0x038A0C -#define GT96100_MPSC_CHAN7_REG2 0x038A10 -#define GT96100_MPSC_CHAN7_REG3 0x038A14 -#define GT96100_MPSC_CHAN7_REG4 0x038A18 -#define GT96100_MPSC_CHAN7_REG5 0x038A1C -#define GT96100_MPSC_CHAN7_REG6 0x038A20 -#define GT96100_MPSC_CHAN7_REG7 0x038A24 -#define GT96100_MPSC_CHAN7_REG8 0x038A28 -#define GT96100_MPSC_CHAN7_REG9 0x038A2C -#define GT96100_MPSC_CHAN7_REG10 0x038A30 -#define GT96100_MPSC_CHAN7_REG11 0x038A34 -/* FlexTDMs */ -/* TDPR0 - Transmit Dual Port RAM. block size 0xff */ -#define GT96100_FXTDM0_TDPR0_BLK0_BASE 0x000B00 -#define GT96100_FXTDM0_TDPR0_BLK1_BASE 0x001B00 -#define GT96100_FXTDM0_TDPR0_BLK2_BASE 0x002B00 -#define GT96100_FXTDM0_TDPR0_BLK3_BASE 0x003B00 -/* RDPR0 - Receive Dual Port RAM. block size 0xff */ -#define GT96100_FXTDM0_RDPR0_BLK0_BASE 0x004B00 -#define GT96100_FXTDM0_RDPR0_BLK1_BASE 0x005B00 -#define GT96100_FXTDM0_RDPR0_BLK2_BASE 0x006B00 -#define GT96100_FXTDM0_RDPR0_BLK3_BASE 0x007B00 -#define GT96100_FXTDM0_TX_READ_PTR 0x008B00 -#define GT96100_FXTDM0_RX_READ_PTR 0x008B04 -#define GT96100_FXTDM0_CONFIG 0x008B08 -#define GT96100_FXTDM0_AUX_CHANA_TX 0x008B0C -#define GT96100_FXTDM0_AUX_CHANA_RX 0x008B10 -#define GT96100_FXTDM0_AUX_CHANB_TX 0x008B14 -#define GT96100_FXTDM0_AUX_CHANB_RX 0x008B18 -#define GT96100_FXTDM1_TDPR1_BLK0_BASE 0x010B00 -#define GT96100_FXTDM1_TDPR1_BLK1_BASE 0x011B00 -#define GT96100_FXTDM1_TDPR1_BLK2_BASE 0x012B00 -#define GT96100_FXTDM1_TDPR1_BLK3_BASE 0x013B00 -#define GT96100_FXTDM1_RDPR1_BLK0_BASE 0x014B00 -#define GT96100_FXTDM1_RDPR1_BLK1_BASE 0x015B00 -#define GT96100_FXTDM1_RDPR1_BLK2_BASE 0x016B00 -#define GT96100_FXTDM1_RDPR1_BLK3_BASE 0x017B00 -#define GT96100_FXTDM1_TX_READ_PTR 0x018B00 -#define GT96100_FXTDM1_RX_READ_PTR 0x018B04 -#define GT96100_FXTDM1_CONFIG 0x018B08 -#define GT96100_FXTDM1_AUX_CHANA_TX 0x018B0C -#define GT96100_FXTDM1_AUX_CHANA_RX 0x018B10 -#define GT96100_FLTDM1_AUX_CHANB_TX 0x018B14 -#define GT96100_FLTDM1_AUX_CHANB_RX 0x018B18 -#define GT96100_FLTDM2_TDPR2_BLK0_BASE 0x020B00 -#define GT96100_FLTDM2_TDPR2_BLK1_BASE 0x021B00 -#define GT96100_FLTDM2_TDPR2_BLK2_BASE 0x022B00 -#define GT96100_FLTDM2_TDPR2_BLK3_BASE 0x023B00 -#define GT96100_FLTDM2_RDPR2_BLK0_BASE 0x024B00 -#define GT96100_FLTDM2_RDPR2_BLK1_BASE 0x025B00 -#define GT96100_FLTDM2_RDPR2_BLK2_BASE 0x026B00 -#define GT96100_FLTDM2_RDPR2_BLK3_BASE 0x027B00 -#define GT96100_FLTDM2_TX_READ_PTR 0x028B00 -#define GT96100_FLTDM2_RX_READ_PTR 0x028B04 -#define GT96100_FLTDM2_CONFIG 0x028B08 -#define GT96100_FLTDM2_AUX_CHANA_TX 0x028B0C -#define GT96100_FLTDM2_AUX_CHANA_RX 0x028B10 -#define GT96100_FLTDM2_AUX_CHANB_TX 0x028B14 -#define GT96100_FLTDM2_AUX_CHANB_RX 0x028B18 -#define GT96100_FLTDM3_TDPR3_BLK0_BASE 0x030B00 -#define GT96100_FLTDM3_TDPR3_BLK1_BASE 0x031B00 -#define GT96100_FLTDM3_TDPR3_BLK2_BASE 0x032B00 -#define GT96100_FLTDM3_TDPR3_BLK3_BASE 0x033B00 -#define GT96100_FXTDM3_RDPR3_BLK0_BASE 0x034B00 -#define GT96100_FXTDM3_RDPR3_BLK1_BASE 0x035B00 -#define GT96100_FXTDM3_RDPR3_BLK2_BASE 0x036B00 -#define GT96100_FXTDM3_RDPR3_BLK3_BASE 0x037B00 -#define GT96100_FXTDM3_TX_READ_PTR 0x038B00 -#define GT96100_FXTDM3_RX_READ_PTR 0x038B04 -#define GT96100_FXTDM3_CONFIG 0x038B08 -#define GT96100_FXTDM3_AUX_CHANA_TX 0x038B0C -#define GT96100_FXTDM3_AUX_CHANA_RX 0x038B10 -#define GT96100_FXTDM3_AUX_CHANB_TX 0x038B14 -#define GT96100_FXTDM3_AUX_CHANB_RX 0x038B18 -/* Baud Rate Generators */ -#define GT96100_BRG0_CONFIG 0x102A00 -#define GT96100_BRG0_BAUD_TUNE 0x102A04 -#define GT96100_BRG1_CONFIG 0x102A08 -#define GT96100_BRG1_BAUD_TUNE 0x102A0C -#define GT96100_BRG2_CONFIG 0x102A10 -#define GT96100_BRG2_BAUD_TUNE 0x102A14 -#define GT96100_BRG3_CONFIG 0x102A18 -#define GT96100_BRG3_BAUD_TUNE 0x102A1C -#define GT96100_BRG4_CONFIG 0x102A20 -#define GT96100_BRG4_BAUD_TUNE 0x102A24 -#define GT96100_BRG5_CONFIG 0x102A28 -#define GT96100_BRG5_BAUD_TUNE 0x102A2C -#define GT96100_BRG6_CONFIG 0x102A30 -#define GT96100_BRG6_BAUD_TUNE 0x102A34 -#define GT96100_BRG7_CONFIG 0x102A38 -#define GT96100_BRG7_BAUD_TUNE 0x102A3C -/* Routing Registers */ -#define GT96100_ROUTE_MAIN 0x101A00 -#define GT96100_ROUTE_RX_CLOCK 0x101A10 -#define GT96100_ROUTE_TX_CLOCK 0x101A20 -/* General Purpose Ports */ -#define GT96100_GPP_CONFIG0 0x100A00 -#define GT96100_GPP_CONFIG1 0x100A04 -#define GT96100_GPP_CONFIG2 0x100A08 -#define GT96100_GPP_CONFIG3 0x100A0C -#define GT96100_GPP_IO0 0x100A20 -#define GT96100_GPP_IO1 0x100A24 -#define GT96100_GPP_IO2 0x100A28 -#define GT96100_GPP_IO3 0x100A2C -#define GT96100_GPP_DATA0 0x100A40 -#define GT96100_GPP_DATA1 0x100A44 -#define GT96100_GPP_DATA2 0x100A48 -#define GT96100_GPP_DATA3 0x100A4C -#define GT96100_GPP_LEVEL0 0x100A60 -#define GT96100_GPP_LEVEL1 0x100A64 -#define GT96100_GPP_LEVEL2 0x100A68 -#define GT96100_GPP_LEVEL3 0x100A6C -/* Watchdog */ -#define GT96100_WD_CONFIG 0x101A80 -#define GT96100_WD_VALUE 0x101A84 -/* Communication Unit Arbiter */ -#define GT96100_COMM_UNIT_ARBTR_CONFIG 0x101AC0 -/* PCI Arbiters */ -#define GT96100_PCI0_ARBTR_CONFIG 0x101AE0 -#define GT96100_PCI1_ARBTR_CONFIG 0x101AE4 -/* CIU Arbiter */ -#define GT96100_CIU_ARBITER_CONFIG 0x101AC0 -/* Interrupt Controller */ -#define GT96100_MAIN_CAUSE 0x000C18 -#define GT96100_INT0_MAIN_MASK 0x000C1C -#define GT96100_INT1_MAIN_MASK 0x000C24 -#define GT96100_HIGH_CAUSE 0x000C98 -#define GT96100_INT0_HIGH_MASK 0x000C9C -#define GT96100_INT1_HIGH_MASK 0x000CA4 -#define GT96100_INT0_SELECT 0x000C70 -#define GT96100_INT1_SELECT 0x000C74 -#define GT96100_SERIAL_CAUSE 0x103A00 -#define GT96100_SERINT0_MASK 0x103A80 -#define GT96100_SERINT1_MASK 0x103A88 - -#endif /* _GT96100_H */ diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h index 25f5e8a4177d..0fe02945feba 100644 --- a/include/asm-mips/hazards.h +++ b/include/asm-mips/hazards.h @@ -12,102 +12,95 @@ #ifdef __ASSEMBLY__ - - .macro _ssnop - sll $0, $0, 1 - .endm - - .macro _ehb - sll $0, $0, 3 - .endm - -/* - * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent - * use of the JTLB for instructions should not occur for 4 cpu cycles and use - * for data translations should not occur for 3 cpu cycles. - */ -#ifdef CONFIG_CPU_RM9000 - - .macro mtc0_tlbw_hazard - .set push - .set mips32 - _ssnop; _ssnop; _ssnop; _ssnop - .set pop - .endm - - .macro tlbw_eret_hazard - .set push - .set mips32 - _ssnop; _ssnop; _ssnop; _ssnop - .set pop - .endm - +#define ASMMACRO(name, code...) .macro name; code; .endm #else -/* - * The taken branch will result in a two cycle penalty for the two killed - * instructions on R4000 / R4400. Other processors only have a single cycle - * hazard so this is nice trick to have an optimal code for a range of - * processors. - */ - .macro mtc0_tlbw_hazard - b . + 8 - .endm +#define ASMMACRO(name, code...) \ +__asm__(".macro " #name "; " #code "; .endm"); \ + \ +static inline void name(void) \ +{ \ + __asm__ __volatile__ (#name); \ +} - .macro tlbw_eret_hazard - .endm #endif +ASMMACRO(_ssnop, + sll $0, $0, 1 + ) + +ASMMACRO(_ehb, + sll $0, $0, 3 + ) + /* - * mtc0->mfc0 hazard - * The 24K has a 2 cycle mtc0/mfc0 execution hazard. - * It is a MIPS32R2 processor so ehb will clear the hazard. + * TLB hazards */ +#if defined(CONFIG_CPU_MIPSR2) -#ifdef CONFIG_CPU_MIPSR2 /* - * Use a macro for ehb unless explicit support for MIPSR2 is enabled + * MIPSR2 defines ehb for hazard avoidance */ -#define irq_enable_hazard \ +ASMMACRO(mtc0_tlbw_hazard, + _ehb + ) +ASMMACRO(tlbw_use_hazard, + _ehb + ) +ASMMACRO(tlb_probe_hazard, + _ehb + ) +ASMMACRO(irq_enable_hazard, + ) +ASMMACRO(irq_disable_hazard, _ehb - -#define irq_disable_hazard \ - _ehb - -#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) - + ) +ASMMACRO(back_to_back_c0_hazard, + _ehb + ) /* - * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. + * gcc has a tradition of misscompiling the previous construct using the + * address of a label as argument to inline assembler. Gas otoh has the + * annoying difference between la and dla which are only usable for 32-bit + * rsp. 64-bit code, so can't be used without conditional compilation. + * The alterantive is switching the assembler to 64-bit code which happens + * to work right even for 32-bit code ... */ +#define instruction_hazard() \ +do { \ + unsigned long tmp; \ + \ + __asm__ __volatile__( \ + " .set mips64r2 \n" \ + " dla %0, 1f \n" \ + " jr.hb %0 \n" \ + " .set mips0 \n" \ + "1: \n" \ + : "=r" (tmp)); \ +} while (0) -#define irq_enable_hazard - -#define irq_disable_hazard - -#else +#elif defined(CONFIG_CPU_R10000) /* - * Classic MIPS needs 1 - 3 nops or ssnops + * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. */ -#define irq_enable_hazard -#define irq_disable_hazard \ - _ssnop; _ssnop; _ssnop -#endif - -#else /* __ASSEMBLY__ */ - -__asm__( - " .macro _ssnop \n" - " sll $0, $0, 1 \n" - " .endm \n" - " \n" - " .macro _ehb \n" - " sll $0, $0, 3 \n" - " .endm \n"); +ASMMACRO(mtc0_tlbw_hazard, + ) +ASMMACRO(tlbw_use_hazard, + ) +ASMMACRO(tlb_probe_hazard, + ) +ASMMACRO(irq_enable_hazard, + ) +ASMMACRO(irq_disable_hazard, + ) +ASMMACRO(back_to_back_c0_hazard, + ) +#define instruction_hazard() do { } while (0) -#ifdef CONFIG_CPU_RM9000 +#elif defined(CONFIG_CPU_RM9000) /* * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent @@ -115,176 +108,73 @@ __asm__( * for data translations should not occur for 3 cpu cycles. */ -#define mtc0_tlbw_hazard() \ - __asm__ __volatile__( \ - " .set mips32 \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " .set mips0 \n") - -#define tlbw_use_hazard() \ - __asm__ __volatile__( \ - " .set mips32 \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " .set mips0 \n") - -#else - -/* - * Overkill warning ... - */ -#define mtc0_tlbw_hazard() \ - __asm__ __volatile__( \ - " .set noreorder \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " .set reorder \n") - -#define tlbw_use_hazard() \ - __asm__ __volatile__( \ - " .set noreorder \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " .set reorder \n") - -#endif - -/* - * Interrupt enable/disable hazards - * Some processors have hazards when modifying - * the status register to change the interrupt state - */ - -#ifdef CONFIG_CPU_MIPSR2 - -__asm__(" .macro irq_enable_hazard \n" - " _ehb \n" - " .endm \n" - " \n" - " .macro irq_disable_hazard \n" - " _ehb \n" - " .endm \n"); +ASMMACRO(mtc0_tlbw_hazard, + _ssnop; _ssnop; _ssnop; _ssnop + ) +ASMMACRO(tlbw_use_hazard, + _ssnop; _ssnop; _ssnop; _ssnop + ) +ASMMACRO(tlb_probe_hazard, + _ssnop; _ssnop; _ssnop; _ssnop + ) +ASMMACRO(irq_enable_hazard, + ) +ASMMACRO(irq_disable_hazard, + ) +ASMMACRO(back_to_back_c0_hazard, + ) +#define instruction_hazard() do { } while (0) -#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) +#elif defined(CONFIG_CPU_SB1) /* - * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. + * Mostly like R4000 for historic reasons */ - -__asm__( - " .macro irq_enable_hazard \n" - " .endm \n" - " \n" - " .macro irq_disable_hazard \n" - " .endm \n"); +ASMMACRO(mtc0_tlbw_hazard, + ) +ASMMACRO(tlbw_use_hazard, + ) +ASMMACRO(tlb_probe_hazard, + ) +ASMMACRO(irq_enable_hazard, + ) +ASMMACRO(irq_disable_hazard, + _ssnop; _ssnop; _ssnop + ) +ASMMACRO(back_to_back_c0_hazard, + ) +#define instruction_hazard() do { } while (0) #else /* - * Default for classic MIPS processors. Assume worst case hazards but don't - * care about the irq_enable_hazard - sooner or later the hardware will - * enable it and we don't care when exactly. - */ - -__asm__( - " # \n" - " # There is a hazard but we do not care \n" - " # \n" - " .macro\tirq_enable_hazard \n" - " .endm \n" - " \n" - " .macro\tirq_disable_hazard \n" - " _ssnop \n" - " _ssnop \n" - " _ssnop \n" - " .endm \n"); - -#endif - -#define irq_enable_hazard() \ - __asm__ __volatile__("irq_enable_hazard") -#define irq_disable_hazard() \ - __asm__ __volatile__("irq_disable_hazard") - - -/* - * Back-to-back hazards - + * Finally the catchall case for all other processors including R4000, R4400, + * R4600, R4700, R5000, RM7000, NEC VR41xx etc. * - * What is needed to separate a move to cp0 from a subsequent read from the - * same cp0 register? - */ -#ifdef CONFIG_CPU_MIPSR2 - -__asm__(" .macro back_to_back_c0_hazard \n" - " _ehb \n" - " .endm \n"); - -#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \ - defined(CONFIG_CPU_SB1) - -__asm__(" .macro back_to_back_c0_hazard \n" - " .endm \n"); - -#else - -__asm__(" .macro back_to_back_c0_hazard \n" - " .set noreorder \n" - " _ssnop \n" - " _ssnop \n" - " _ssnop \n" - " .set reorder \n" - " .endm"); - -#endif - -#define back_to_back_c0_hazard() \ - __asm__ __volatile__("back_to_back_c0_hazard") - - -/* - * Instruction execution hazard - */ -#ifdef CONFIG_CPU_MIPSR2 -/* - * gcc has a tradition of misscompiling the previous construct using the - * address of a label as argument to inline assembler. Gas otoh has the - * annoying difference between la and dla which are only usable for 32-bit - * rsp. 64-bit code, so can't be used without conditional compilation. - * The alterantive is switching the assembler to 64-bit code which happens - * to work right even for 32-bit code ... + * The taken branch will result in a two cycle penalty for the two killed + * instructions on R4000 / R4400. Other processors only have a single cycle + * hazard so this is nice trick to have an optimal code for a range of + * processors. */ -#define instruction_hazard() \ -do { \ - unsigned long tmp; \ - \ - __asm__ __volatile__( \ - " .set mips64r2 \n" \ - " dla %0, 1f \n" \ - " jr.hb %0 \n" \ - " .set mips0 \n" \ - "1: \n" \ - : "=r" (tmp)); \ -} while (0) - -#else +ASMMACRO(mtc0_tlbw_hazard, + nop + ) +ASMMACRO(tlbw_use_hazard, + nop; nop; nop + ) +ASMMACRO(tlb_probe_hazard, + nop; nop; nop + ) +ASMMACRO(irq_enable_hazard, + ) +ASMMACRO(irq_disable_hazard, + nop; nop; nop + ) +ASMMACRO(back_to_back_c0_hazard, + _ssnop; _ssnop; _ssnop; + ) #define instruction_hazard() do { } while (0) -#endif - -extern void mips_ihb(void); -#endif /* __ASSEMBLY__ */ +#endif #endif /* _ASM_HAZARDS_H */ diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 896550bad322..d35c61776a02 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h @@ -76,8 +76,4 @@ extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, unsigned long hwmask); #endif /* CONFIG_MIPS_MT_SMTC */ -#ifdef CONFIG_SMP -#define ARCH_HAS_IRQ_PER_CPU -#endif - #endif /* _ASM_IRQ_H */ diff --git a/include/asm-mips/mach-atlas/mc146818rtc.h b/include/asm-mips/mach-atlas/mc146818rtc.h index 397522ea5565..a73a5698420c 100644 --- a/include/asm-mips/mach-atlas/mc146818rtc.h +++ b/include/asm-mips/mach-atlas/mc146818rtc.h @@ -28,10 +28,12 @@ #include <asm/mips-boards/atlas.h> #include <asm/mips-boards/atlasint.h> +#define ARCH_RTC_LOCATION + #define RTC_PORT(x) (ATLAS_RTC_ADR_REG + (x) * 8) #define RTC_IO_EXTENT 0x100 #define RTC_IOMAPPED 0 -#define RTC_IRQ ATLASINT_RTC +#define RTC_IRQ ATLAS_INT_RTC static inline unsigned char CMOS_READ(unsigned long addr) { diff --git a/include/asm-mips/mach-ev96100/mach-gt64120.h b/include/asm-mips/mach-ev96100/mach-gt64120.h deleted file mode 100644 index 0ef1e6c25acf..000000000000 --- a/include/asm-mips/mach-ev96100/mach-gt64120.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * This is a direct copy of the ev96100.h file, with a global - * search and replace. The numbers are the same. - * - * The reason I'm duplicating this is so that the 64120/96100 - * defines won't be confusing in the source code. - */ -#ifndef _ASM_GT64120_EV96100_GT64120_DEP_H -#define _ASM_GT64120_EV96100_GT64120_DEP_H - -/* - * GT96100 config space base address - */ -#define GT64120_BASE (KSEG1ADDR(0x14000000)) - -/* - * PCI Bus allocation - * - * (Guessing ...) - */ -#define GT_PCI_MEM_BASE 0x12000000UL -#define GT_PCI_MEM_SIZE 0x02000000UL -#define GT_PCI_IO_BASE 0x10000000UL -#define GT_PCI_IO_SIZE 0x02000000UL -#define GT_ISA_IO_BASE PCI_IO_BASE - -/* - * Duart I/O ports. - */ -#define EV96100_COM1_BASE_ADDR (0xBD000000 + 0x20) -#define EV96100_COM2_BASE_ADDR (0xBD000000 + 0x00) - - -/* - * EV96100 interrupt controller register base. - */ -#define EV96100_ICTRL_REGS_BASE (KSEG1ADDR(0x1f000000)) - -/* - * EV96100 UART register base. - */ -#define EV96100_UART0_REGS_BASE EV96100_COM1_BASE_ADDR -#define EV96100_UART1_REGS_BASE EV96100_COM2_BASE_ADDR -#define EV96100_BASE_BAUD ( 3686400 / 16 ) - -#endif /* _ASM_GT64120_EV96100_GT64120_DEP_H */ diff --git a/include/asm-mips/mach-excite/excite.h b/include/asm-mips/mach-excite/excite.h index 130bd4b8edce..4c29ba44992c 100644 --- a/include/asm-mips/mach-excite/excite.h +++ b/include/asm-mips/mach-excite/excite.h @@ -7,7 +7,7 @@ #define EXCITE_CPU_EXT_CLOCK 100000000 -#if !defined(__ASSEMBLER__) +#if !defined(__ASSEMBLY__) void __init excite_kgdb_init(void); void excite_procfs_init(void); extern unsigned long memsize; diff --git a/include/asm-mips/mach-excite/excite_fpga.h b/include/asm-mips/mach-excite/excite_fpga.h new file mode 100644 index 000000000000..38fcda703a0b --- /dev/null +++ b/include/asm-mips/mach-excite/excite_fpga.h @@ -0,0 +1,80 @@ +#ifndef EXCITE_FPGA_H_INCLUDED +#define EXCITE_FPGA_H_INCLUDED + + +/** + * Adress alignment of the individual FPGA bytes. + * The address arrangement of the individual bytes of the FPGA is two + * byte aligned at the embedded MK2 platform. + */ +#ifdef EXCITE_CCI_FPGA_MK2 +typedef unsigned char excite_cci_fpga_align_t __attribute__ ((aligned(2))); +#else +typedef unsigned char excite_cci_fpga_align_t; +#endif + + +/** + * Size of Dual Ported RAM. + */ +#define EXCITE_DPR_SIZE 263 + + +/** + * Size of Reserved Status Fields in Dual Ported RAM. + */ +#define EXCITE_DPR_STATUS_SIZE 7 + + + +/** + * FPGA. + * Hardware register layout of the FPGA interface. The FPGA must accessed + * byte wise solely. + * @see EXCITE_CCI_DPR_MK2 + */ +typedef struct excite_fpga { + + /** + * Dual Ported RAM. + */ + excite_cci_fpga_align_t dpr[EXCITE_DPR_SIZE]; + + /** + * Status. + */ + excite_cci_fpga_align_t status[EXCITE_DPR_STATUS_SIZE]; + +#ifdef EXCITE_CCI_FPGA_MK2 + /** + * RM9000 Interrupt. + * Write access initiates interrupt at the RM9000 (MIPS) processor of the eXcite. + */ + excite_cci_fpga_align_t rm9k_int; +#else + /** + * MK2 Interrupt. + * Write access initiates interrupt at the ARM processor of the MK2. + */ + excite_cci_fpga_align_t mk2_int; + + excite_cci_fpga_align_t gap[0x1000-0x10f]; + + /** + * IRQ Source/Acknowledge. + */ + excite_cci_fpga_align_t rm9k_irq_src; + + /** + * IRQ Mask. + * Set bits enable the related interrupt. + */ + excite_cci_fpga_align_t rm9k_irq_mask; +#endif + + +} excite_fpga; + + + +#endif /* ndef EXCITE_FPGA_H_INCLUDED */ diff --git a/include/asm-mips/mach-qemu/cpu-feature-overrides.h b/include/asm-mips/mach-qemu/cpu-feature-overrides.h index f4e370e27168..529445dacedb 100644 --- a/include/asm-mips/mach-qemu/cpu-feature-overrides.h +++ b/include/asm-mips/mach-qemu/cpu-feature-overrides.h @@ -20,7 +20,7 @@ #define cpu_has_llsc 1 #define cpu_has_vtag_icache 0 -#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000) +#define cpu_has_dc_aliases 0 #define cpu_has_ic_fills_f_dc 0 #define cpu_has_dsp 0 diff --git a/include/asm-mips/mips-boards/atlasint.h b/include/asm-mips/mips-boards/atlasint.h index fd7ebc54fa90..b15e4ea0b091 100644 --- a/include/asm-mips/mips-boards/atlasint.h +++ b/include/asm-mips/mips-boards/atlasint.h @@ -1,6 +1,7 @@ /* - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 1999 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 1999, 2006 MIPS Technologies, Inc. All rights reserved. + * Authors: Carsten Langgaard <carstenl@mips.com> + * Maciej W. Rozycki <macro@mips.com> * * ######################################################################## * @@ -25,41 +26,88 @@ #ifndef _MIPS_ATLASINT_H #define _MIPS_ATLASINT_H -#define ATLASINT_BASE 1 -#define ATLASINT_UART (ATLASINT_BASE+0) -#define ATLASINT_TIM0 (ATLASINT_BASE+1) -#define ATLASINT_RES2 (ATLASINT_BASE+2) -#define ATLASINT_RES3 (ATLASINT_BASE+3) -#define ATLASINT_RTC (ATLASINT_BASE+4) -#define ATLASINT_COREHI (ATLASINT_BASE+5) -#define ATLASINT_CORELO (ATLASINT_BASE+6) -#define ATLASINT_RES7 (ATLASINT_BASE+7) -#define ATLASINT_PCIA (ATLASINT_BASE+8) -#define ATLASINT_PCIB (ATLASINT_BASE+9) -#define ATLASINT_PCIC (ATLASINT_BASE+10) -#define ATLASINT_PCID (ATLASINT_BASE+11) -#define ATLASINT_ENUM (ATLASINT_BASE+12) -#define ATLASINT_DEG (ATLASINT_BASE+13) -#define ATLASINT_ATXFAIL (ATLASINT_BASE+14) -#define ATLASINT_INTA (ATLASINT_BASE+15) -#define ATLASINT_INTB (ATLASINT_BASE+16) -#define ATLASINT_ETH ATLASINT_INTB -#define ATLASINT_INTC (ATLASINT_BASE+17) -#define ATLASINT_SCSI ATLASINT_INTC -#define ATLASINT_INTD (ATLASINT_BASE+18) -#define ATLASINT_SERR (ATLASINT_BASE+19) -#define ATLASINT_RES20 (ATLASINT_BASE+20) -#define ATLASINT_RES21 (ATLASINT_BASE+21) -#define ATLASINT_RES22 (ATLASINT_BASE+22) -#define ATLASINT_RES23 (ATLASINT_BASE+23) -#define ATLASINT_RES24 (ATLASINT_BASE+24) -#define ATLASINT_RES25 (ATLASINT_BASE+25) -#define ATLASINT_RES26 (ATLASINT_BASE+26) -#define ATLASINT_RES27 (ATLASINT_BASE+27) -#define ATLASINT_RES28 (ATLASINT_BASE+28) -#define ATLASINT_RES29 (ATLASINT_BASE+29) -#define ATLASINT_RES30 (ATLASINT_BASE+30) -#define ATLASINT_RES31 (ATLASINT_BASE+31) -#define ATLASINT_END (ATLASINT_BASE+31) +/* + * Interrupts 0..7 are used for Atlas CPU interrupts (nonEIC mode) + */ +#define MIPSCPU_INT_BASE 0 + +/* CPU interrupt offsets */ +#define MIPSCPU_INT_SW0 0 +#define MIPSCPU_INT_SW1 1 +#define MIPSCPU_INT_MB0 2 +#define MIPSCPU_INT_ATLAS MIPSCPU_INT_MB0 +#define MIPSCPU_INT_MB1 3 +#define MIPSCPU_INT_MB2 4 +#define MIPSCPU_INT_MB3 5 +#define MIPSCPU_INT_MB4 6 +#define MIPSCPU_INT_CPUCTR 7 + +/* + * Interrupts 8..39 are used for Atlas interrupt controller interrupts + */ +#define ATLAS_INT_BASE 8 +#define ATLAS_INT_UART (ATLAS_INT_BASE + 0) +#define ATLAS_INT_TIM0 (ATLAS_INT_BASE + 1) +#define ATLAS_INT_RES2 (ATLAS_INT_BASE + 2) +#define ATLAS_INT_RES3 (ATLAS_INT_BASE + 3) +#define ATLAS_INT_RTC (ATLAS_INT_BASE + 4) +#define ATLAS_INT_COREHI (ATLAS_INT_BASE + 5) +#define ATLAS_INT_CORELO (ATLAS_INT_BASE + 6) +#define ATLAS_INT_RES7 (ATLAS_INT_BASE + 7) +#define ATLAS_INT_PCIA (ATLAS_INT_BASE + 8) +#define ATLAS_INT_PCIB (ATLAS_INT_BASE + 9) +#define ATLAS_INT_PCIC (ATLAS_INT_BASE + 10) +#define ATLAS_INT_PCID (ATLAS_INT_BASE + 11) +#define ATLAS_INT_ENUM (ATLAS_INT_BASE + 12) +#define ATLAS_INT_DEG (ATLAS_INT_BASE + 13) +#define ATLAS_INT_ATXFAIL (ATLAS_INT_BASE + 14) +#define ATLAS_INT_INTA (ATLAS_INT_BASE + 15) +#define ATLAS_INT_INTB (ATLAS_INT_BASE + 16) +#define ATLAS_INT_ETH ATLAS_INT_INTB +#define ATLAS_INT_INTC (ATLAS_INT_BASE + 17) +#define ATLAS_INT_SCSI ATLAS_INT_INTC +#define ATLAS_INT_INTD (ATLAS_INT_BASE + 18) +#define ATLAS_INT_SERR (ATLAS_INT_BASE + 19) +#define ATLAS_INT_RES20 (ATLAS_INT_BASE + 20) +#define ATLAS_INT_RES21 (ATLAS_INT_BASE + 21) +#define ATLAS_INT_RES22 (ATLAS_INT_BASE + 22) +#define ATLAS_INT_RES23 (ATLAS_INT_BASE + 23) +#define ATLAS_INT_RES24 (ATLAS_INT_BASE + 24) +#define ATLAS_INT_RES25 (ATLAS_INT_BASE + 25) +#define ATLAS_INT_RES26 (ATLAS_INT_BASE + 26) +#define ATLAS_INT_RES27 (ATLAS_INT_BASE + 27) +#define ATLAS_INT_RES28 (ATLAS_INT_BASE + 28) +#define ATLAS_INT_RES29 (ATLAS_INT_BASE + 29) +#define ATLAS_INT_RES30 (ATLAS_INT_BASE + 30) +#define ATLAS_INT_RES31 (ATLAS_INT_BASE + 31) +#define ATLAS_INT_END (ATLAS_INT_BASE + 31) + +/* + * Interrupts 64..127 are used for Soc-it Classic interrupts + */ +#define MSC01C_INT_BASE 64 + +/* SOC-it Classic interrupt offsets */ +#define MSC01C_INT_TMR 0 +#define MSC01C_INT_PCI 1 + +/* + * Interrupts 64..127 are used for Soc-it EIC interrupts + */ +#define MSC01E_INT_BASE 64 + +/* SOC-it EIC interrupt offsets */ +#define MSC01E_INT_SW0 1 +#define MSC01E_INT_SW1 2 +#define MSC01E_INT_MB0 3 +#define MSC01E_INT_ATLAS MSC01E_INT_MB0 +#define MSC01E_INT_MB1 4 +#define MSC01E_INT_MB2 5 +#define MSC01E_INT_MB3 6 +#define MSC01E_INT_MB4 7 +#define MSC01E_INT_TMR 8 +#define MSC01E_INT_PCI 9 +#define MSC01E_INT_PERFCTR 10 +#define MSC01E_INT_CPUCTR 11 #endif /* !(_MIPS_ATLASINT_H) */ diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h index 18b69de87daa..fe065d6070ca 100644 --- a/include/asm-mips/mmu_context.h +++ b/include/asm-mips/mmu_context.h @@ -262,10 +262,10 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) /* See comments for similar code above */ prevvpe = dvpe(); oldasid = (read_c0_entryhi() & ASID_MASK); - if(smtc_live_asid[mytlb][oldasid]) { - smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); - if(smtc_live_asid[mytlb][oldasid] == 0) - smtc_flush_tlb_asid(oldasid); + if (smtc_live_asid[mytlb][oldasid]) { + smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); + if(smtc_live_asid[mytlb][oldasid] == 0) + smtc_flush_tlb_asid(oldasid); } /* See comments for similar code above */ write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h index 219d359861f3..85b258ee7090 100644 --- a/include/asm-mips/page.h +++ b/include/asm-mips/page.h @@ -34,6 +34,8 @@ #ifndef __ASSEMBLY__ +#include <asm/cpu-features.h> + extern void clear_page(void * page); extern void copy_page(void * to, void * from); @@ -53,7 +55,7 @@ static inline void clear_user_page(void *addr, unsigned long vaddr, extern void (*flush_data_cache_page)(unsigned long addr); clear_page(addr); - if (pages_do_alias((unsigned long) addr, vaddr)) + if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) flush_data_cache_page((unsigned long)addr); } @@ -63,7 +65,8 @@ static inline void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, extern void (*flush_data_cache_page)(unsigned long addr); copy_page(vto, vfrom); - if (pages_do_alias((unsigned long)vto, vaddr)) + if (!cpu_has_ic_fills_f_dc || + pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) flush_data_cache_page((unsigned long)vto); } @@ -74,15 +77,17 @@ static inline void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, #ifdef CONFIG_CPU_MIPS32 typedef struct { unsigned long pte_low, pte_high; } pte_t; #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) + #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) #else typedef struct { unsigned long long pte; } pte_t; #define pte_val(x) ((x).pte) + #define __pte(x) ((pte_t) { (x) } ) #endif #else typedef struct { unsigned long pte; } pte_t; #define pte_val(x) ((x).pte) -#endif #define __pte(x) ((pte_t) { (x) } ) +#endif /* * For 3-level pagetables we defines these ourselves, for 2-level the diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h index c59a1e21f5b0..d05fb6f38aa7 100644 --- a/include/asm-mips/pgtable-64.h +++ b/include/asm-mips/pgtable-64.h @@ -93,8 +93,12 @@ #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t)) #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +#if PGDIR_SIZE >= TASK_SIZE +#define USER_PTRS_PER_PGD (1) +#else #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) -#define FIRST_USER_ADDRESS 0 +#endif +#define FIRST_USER_ADDRESS 0UL #define VMALLOC_START MAP_BASE #define VMALLOC_END \ diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h index 4113316ee0da..4fb0fc43ffd7 100644 --- a/include/asm-mips/ptrace.h +++ b/include/asm-mips/ptrace.h @@ -10,8 +10,6 @@ #define _ASM_PTRACE_H -#include <asm/isadep.h> - /* 0 - 31 are integer registers, 32 - 63 are fp registers. */ #define FPR_BASE 32 #define PC 64 @@ -73,6 +71,7 @@ struct pt_regs { #ifdef __KERNEL__ #include <linux/linkage.h> +#include <asm/isadep.h> /* * Does the process account for user or for system time? diff --git a/include/asm-mips/serial.h b/include/asm-mips/serial.h index 584bd9c0ab2e..035637c67e7c 100644 --- a/include/asm-mips/serial.h +++ b/include/asm-mips/serial.h @@ -52,9 +52,9 @@ #endif /* - * Both Galileo boards have the same UART mappings. + * Galileo EV64120 evaluation board */ -#if defined (CONFIG_MIPS_EV96100) || defined (CONFIG_MIPS_EV64120) +#ifdef CONFIG_MIPS_EV64120 #include <asm/galileo-boards/ev96100.h> #include <asm/galileo-boards/ev96100int.h> #define EV96100_SERIAL_PORT_DEFNS \ diff --git a/include/asm-mips/sibyte/sb1250_defs.h b/include/asm-mips/sibyte/sb1250_defs.h index 335dbaf1d831..a885491217c1 100644 --- a/include/asm-mips/sibyte/sb1250_defs.h +++ b/include/asm-mips/sibyte/sb1250_defs.h @@ -212,7 +212,7 @@ * Note: you'll need to define uint32_t and uint64_t in your headers. */ -#if !defined(__ASSEMBLER__) +#if !defined(__ASSEMBLY__) #define _SB_MAKE64(x) ((uint64_t)(x)) #define _SB_MAKE32(x) ((uint32_t)(x)) #else @@ -251,9 +251,9 @@ */ -#if defined(__mips64) && !defined(__ASSEMBLER__) +#if defined(__mips64) && !defined(__ASSEMBLY__) #define SBWRITECSR(csr,val) *((volatile uint64_t *) PHYS_TO_K1(csr)) = (val) #define SBREADCSR(csr) (*((volatile uint64_t *) PHYS_TO_K1(csr))) -#endif /* __ASSEMBLER__ */ +#endif /* __ASSEMBLY__ */ #endif diff --git a/include/asm-mips/sibyte/sb1250_scd.h b/include/asm-mips/sibyte/sb1250_scd.h index f4178bdcfcb0..7ed0bb611e56 100644 --- a/include/asm-mips/sibyte/sb1250_scd.h +++ b/include/asm-mips/sibyte/sb1250_scd.h @@ -149,7 +149,7 @@ * (For the assembler version, sysrev and dest may be the same register. * Also, it clobbers AT.) */ -#ifdef __ASSEMBLER__ +#ifdef __ASSEMBLY__ #define SYS_SOC_TYPE(dest, sysrev) \ .set push ; \ .set reorder ; \ diff --git a/include/asm-mips/signal.h b/include/asm-mips/signal.h index 87a1dff95199..8b391a2f0814 100644 --- a/include/asm-mips/signal.h +++ b/include/asm-mips/signal.h @@ -108,17 +108,8 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #define SIG_BLOCK 1 /* for blocking signals */ #define SIG_UNBLOCK 2 /* for unblocking signals */ #define SIG_SETMASK 3 /* for setting the signal mask */ -#define SIG_SETMASK32 256 /* Goodie from SGI for BSD compatibility: - set only the low 32 bit of the sigset. */ -/* Type of a signal handler. */ -typedef void __signalfn_t(int); -typedef __signalfn_t __user *__sighandler_t; - -/* Fake signal functions */ -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include <asm-generic/signal.h> struct sigaction { unsigned int sa_flags; diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index 669b8e349ff2..4c1a1b53aeaf 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h @@ -239,7 +239,51 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) : "memory"); } -#define __raw_read_trylock(lock) generic__raw_read_trylock(lock) +static inline int __raw_read_trylock(raw_rwlock_t *rw) +{ + unsigned int tmp; + int ret; + + if (R10000_LLSC_WAR) { + __asm__ __volatile__( + " .set noreorder # __raw_read_trylock \n" + " li %2, 0 \n" + "1: ll %1, %3 \n" + " bnez %1, 2f \n" + " addu %1, 1 \n" + " sc %1, %0 \n" + " beqzl %1, 1b \n" + " .set reorder \n" +#ifdef CONFIG_SMP + " sync \n" +#endif + " li %2, 1 \n" + "2: \n" + : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) + : "m" (rw->lock) + : "memory"); + } else { + __asm__ __volatile__( + " .set noreorder # __raw_read_trylock \n" + " li %2, 0 \n" + "1: ll %1, %3 \n" + " bnez %1, 2f \n" + " addu %1, 1 \n" + " sc %1, %0 \n" + " beqz %1, 1b \n" + " .set reorder \n" +#ifdef CONFIG_SMP + " sync \n" +#endif + " li %2, 1 \n" + "2: \n" + : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) + : "m" (rw->lock) + : "memory"); + } + + return ret; +} static inline int __raw_write_trylock(raw_rwlock_t *rw) { @@ -283,4 +327,5 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return ret; } + #endif /* _ASM_SPINLOCK_H */ diff --git a/include/asm-mips/timex.h b/include/asm-mips/timex.h index 98aa737b34aa..b80de8e0fbbd 100644 --- a/include/asm-mips/timex.h +++ b/include/asm-mips/timex.h @@ -8,6 +8,8 @@ #ifndef _ASM_TIMEX_H #define _ASM_TIMEX_H +#ifdef __KERNEL__ + #include <asm/mipsregs.h> /* @@ -51,4 +53,6 @@ static inline cycles_t get_cycles (void) return read_c0_count(); } +#endif /* __KERNEL__ */ + #endif /* _ASM_TIMEX_H */ diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h index 610ccb8a50b3..c39142920fe6 100644 --- a/include/asm-mips/unistd.h +++ b/include/asm-mips/unistd.h @@ -313,7 +313,7 @@ #define __NR_mknodat (__NR_Linux + 290) #define __NR_fchownat (__NR_Linux + 291) #define __NR_futimesat (__NR_Linux + 292) -#define __NR_fstatat (__NR_Linux + 293) +#define __NR_fstatat64 (__NR_Linux + 293) #define __NR_unlinkat (__NR_Linux + 294) #define __NR_renameat (__NR_Linux + 295) #define __NR_linkat (__NR_Linux + 296) @@ -329,16 +329,18 @@ #define __NR_tee (__NR_Linux + 306) #define __NR_vmsplice (__NR_Linux + 307) #define __NR_move_pages (__NR_Linux + 308) +#define __NR_set_robust_list (__NR_Linux + 309) +#define __NR_get_robust_list (__NR_Linux + 310) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 308 +#define __NR_Linux_syscalls 310 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 308 +#define __NR_O32_Linux_syscalls 310 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -598,7 +600,7 @@ #define __NR_mknodat (__NR_Linux + 249) #define __NR_fchownat (__NR_Linux + 250) #define __NR_futimesat (__NR_Linux + 251) -#define __NR_fstatat (__NR_Linux + 252) +#define __NR_newfstatat (__NR_Linux + 252) #define __NR_unlinkat (__NR_Linux + 253) #define __NR_renameat (__NR_Linux + 254) #define __NR_linkat (__NR_Linux + 255) @@ -614,16 +616,18 @@ #define __NR_tee (__NR_Linux + 265) #define __NR_vmsplice (__NR_Linux + 266) #define __NR_move_pages (__NR_Linux + 267) +#define __NR_set_robust_list (__NR_Linux + 268) +#define __NR_get_robust_list (__NR_Linux + 269) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 267 +#define __NR_Linux_syscalls 269 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 267 +#define __NR_64_Linux_syscalls 269 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -887,7 +891,7 @@ #define __NR_mknodat (__NR_Linux + 253) #define __NR_fchownat (__NR_Linux + 254) #define __NR_futimesat (__NR_Linux + 255) -#define __NR_fstatat (__NR_Linux + 256) +#define __NR_newfstatat (__NR_Linux + 256) #define __NR_unlinkat (__NR_Linux + 257) #define __NR_renameat (__NR_Linux + 258) #define __NR_linkat (__NR_Linux + 259) @@ -903,16 +907,18 @@ #define __NR_tee (__NR_Linux + 269) #define __NR_vmsplice (__NR_Linux + 270) #define __NR_move_pages (__NR_Linux + 271) +#define __NR_set_robust_list (__NR_Linux + 272) +#define __NR_get_robust_list (__NR_Linux + 273) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 271 +#define __NR_Linux_syscalls 273 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 271 +#define __NR_N32_Linux_syscalls 273 #ifdef __KERNEL__ diff --git a/include/asm-mips/user.h b/include/asm-mips/user.h index 89bf8b4cab3c..61f2a093b91b 100644 --- a/include/asm-mips/user.h +++ b/include/asm-mips/user.h @@ -8,6 +8,8 @@ #ifndef _ASM_USER_H #define _ASM_USER_H +#ifdef __KERNEL__ + #include <asm/page.h> #include <asm/reg.h> @@ -55,4 +57,6 @@ struct user { #define HOST_DATA_START_ADDR (u.start_data) #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) +#endif /* __KERNEL__ */ + #endif /* _ASM_USER_H */ diff --git a/include/asm-um/alternative-asm.i b/include/asm-um/alternative-asm.i new file mode 100644 index 000000000000..cae9faca132f --- /dev/null +++ b/include/asm-um/alternative-asm.i @@ -0,0 +1,6 @@ +#ifndef __UM_ALTERNATIVE_ASM_I +#define __UM_ALTERNATIVE_ASM_I + +#include "asm/arch/alternative-asm.i" + +#endif diff --git a/include/asm-um/frame.i b/include/asm-um/frame.i new file mode 100644 index 000000000000..09d5dca5d928 --- /dev/null +++ b/include/asm-um/frame.i @@ -0,0 +1,6 @@ +#ifndef __UM_FRAME_I +#define __UM_FRAME_I + +#include "asm/arch/frame.i" + +#endif diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h index 2c95a319c056..ed59aa4c6ff9 100644 --- a/include/asm-x86_64/acpi.h +++ b/include/asm-x86_64/acpi.h @@ -155,8 +155,6 @@ extern void acpi_reserve_bootmem(void); #endif /*CONFIG_ACPI_SLEEP*/ -#define boot_cpu_physical_apicid boot_cpu_id - extern int acpi_disabled; extern int acpi_pci_disabled; diff --git a/include/asm-x86_64/alternative-asm.i b/include/asm-x86_64/alternative-asm.i new file mode 100644 index 000000000000..e4041f4fa4dc --- /dev/null +++ b/include/asm-x86_64/alternative-asm.i @@ -0,0 +1,14 @@ +#include <linux/config.h> + +#ifdef CONFIG_SMP + .macro LOCK_PREFIX +1: lock + .section .smp_locks,"a" + .align 8 + .quad 1b + .previous + .endm +#else + .macro LOCK_PREFIX + .endm +#endif diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index 9c96a0a8d1bd..9e66d32330c9 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h @@ -17,6 +17,8 @@ extern int apic_verbosity; extern int apic_runs_main_timer; +extern int ioapic_force; +extern int apic_mapped; /* * Define the default level of output to be very little @@ -29,8 +31,6 @@ extern int apic_runs_main_timer; printk(s, ##a); \ } while (0) -#ifdef CONFIG_X86_LOCAL_APIC - struct pt_regs; /* @@ -95,17 +95,12 @@ extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector, #define K8_APIC_EXT_INT_MSG_EXT 0x7 #define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0 -extern int disable_timer_pin_1; - - void smp_send_timer_broadcast_ipi(void); void switch_APIC_timer_to_ipi(void *cpumask); void switch_ipi_to_APIC_timer(void *cpumask); #define ARCH_APICTIMER_STOPS_ON_C3 1 -#endif /* CONFIG_X86_LOCAL_APIC */ - extern unsigned boot_cpu_id; #endif /* __ASM_APIC_H */ diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index f7ba57b1cc08..5b535eaf5309 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h @@ -399,6 +399,8 @@ static __inline__ int fls(int x) return r+1; } +#define ARCH_HAS_FAST_MULTIPLIER 1 + #include <asm-generic/bitops/hweight.h> #endif /* __KERNEL__ */ diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h index 4e3919524240..6b93f5a3a5c8 100644 --- a/include/asm-x86_64/calgary.h +++ b/include/asm-x86_64/calgary.h @@ -24,7 +24,6 @@ #ifndef _ASM_X86_64_CALGARY_H #define _ASM_X86_64_CALGARY_H -#include <linux/config.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/dma-mapping.h> @@ -34,12 +33,12 @@ struct iommu_table { unsigned long it_base; /* mapped address of tce table */ unsigned long it_hint; /* Hint for next alloc */ unsigned long *it_map; /* A simple allocation bitmap for now */ + void __iomem *bbar; /* Bridge BAR */ + u64 tar_val; /* Table Address Register */ + struct timer_list watchdog_timer; spinlock_t it_lock; /* Protects it_map */ unsigned int it_size; /* Size of iommu table in entries */ unsigned char it_busno; /* Bus number this table belongs to */ - void __iomem *bbar; - u64 tar_val; - struct timer_list watchdog_timer; }; #define TCE_TABLE_SIZE_UNSPECIFIED ~0 diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h index 0744db777676..eedc08526b0b 100644 --- a/include/asm-x86_64/dwarf2.h +++ b/include/asm-x86_64/dwarf2.h @@ -13,7 +13,7 @@ away for older version. */ -#ifdef CONFIG_UNWIND_INFO +#ifdef CONFIG_AS_CFI #define CFI_STARTPROC .cfi_startproc #define CFI_ENDPROC .cfi_endproc @@ -28,6 +28,11 @@ #define CFI_REMEMBER_STATE .cfi_remember_state #define CFI_RESTORE_STATE .cfi_restore_state #define CFI_UNDEFINED .cfi_undefined +#ifdef CONFIG_AS_CFI_SIGNAL_FRAME +#define CFI_SIGNAL_FRAME .cfi_signal_frame +#else +#define CFI_SIGNAL_FRAME +#endif #else @@ -45,6 +50,7 @@ #define CFI_REMEMBER_STATE # #define CFI_RESTORE_STATE # #define CFI_UNDEFINED # +#define CFI_SIGNAL_FRAME # #endif diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h index f65674832318..e15d3c8628f3 100644 --- a/include/asm-x86_64/e820.h +++ b/include/asm-x86_64/e820.h @@ -19,13 +19,9 @@ #define E820_RAM 1 #define E820_RESERVED 2 -#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ +#define E820_ACPI 3 #define E820_NVS 4 -#define HIGH_MEMORY (1024*1024) - -#define LOWMEMSIZE() (0x9f000) - #ifndef __ASSEMBLY__ struct e820entry { u64 addr; /* start of memory segment */ @@ -56,8 +52,7 @@ extern void e820_setup_gap(void); extern unsigned long e820_hole_size(unsigned long start_pfn, unsigned long end_pfn); -extern void __init parse_memopt(char *p, char **end); -extern void __init parse_memmapopt(char *p, char **end); +extern void finish_e820_parsing(void); extern struct e820map e820; diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h index 0b4ffbd1a125..1b620db5b9e3 100644 --- a/include/asm-x86_64/fixmap.h +++ b/include/asm-x86_64/fixmap.h @@ -37,13 +37,9 @@ enum fixed_addresses { VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, VSYSCALL_HPET, FIX_HPET_BASE, -#ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ -#endif -#ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, -#endif __end_of_fixed_addresses }; diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h index 50b38e7c58e4..81e714665344 100644 --- a/include/asm-x86_64/genapic.h +++ b/include/asm-x86_64/genapic.h @@ -16,7 +16,6 @@ struct genapic { char *name; u32 int_delivery_mode; u32 int_dest_mode; - u32 int_delivery_dest; /* for quick IPIs */ int (*apic_id_registered)(void); cpumask_t (*target_cpus)(void); void (*init_apic_ldr)(void); diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h index cba8a3b0cded..0217b74cc9fc 100644 --- a/include/asm-x86_64/i387.h +++ b/include/asm-x86_64/i387.h @@ -24,6 +24,7 @@ extern unsigned int mxcsr_feature_mask; extern void mxcsr_feature_mask_init(void); extern void init_fpu(struct task_struct *child); extern int save_i387(struct _fpstate __user *buf); +extern asmlinkage void math_state_restore(void); /* * FPU lazy state save handling... @@ -31,7 +32,9 @@ extern int save_i387(struct _fpstate __user *buf); #define unlazy_fpu(tsk) do { \ if (task_thread_info(tsk)->status & TS_USEDFPU) \ - save_init_fpu(tsk); \ + save_init_fpu(tsk); \ + else \ + tsk->fpu_counter = 0; \ } while (0) /* Ignore delayed exceptions from user space */ @@ -134,8 +137,8 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) #else : [fx] "cdaSDb" (fx), "0" (0)); #endif - if (unlikely(err)) - __clear_user(fx, sizeof(struct i387_fxsave_struct)); + if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) + err = -EFAULT; /* No need to clear here because the caller clears USED_MATH */ return err; } diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h index 59c396431569..8633331420ec 100644 --- a/include/asm-x86_64/intel_arch_perfmon.h +++ b/include/asm-x86_64/intel_arch_perfmon.h @@ -14,6 +14,18 @@ #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ + (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) + +union cpuid10_eax { + struct { + unsigned int version_id:8; + unsigned int num_counters:8; + unsigned int bit_width:8; + unsigned int mask_length:8; + } split; + unsigned int full; +}; #endif /* X86_64_INTEL_ARCH_PERFMON_H */ diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h index fb7a0909a174..5d1b5c68e36e 100644 --- a/include/asm-x86_64/io_apic.h +++ b/include/asm-x86_64/io_apic.h @@ -10,8 +10,6 @@ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar */ -#ifdef CONFIG_X86_IO_APIC - #ifdef CONFIG_PCI_MSI static inline int use_pci_vector(void) {return 1;} static inline void disable_edge_ioapic_vector(unsigned int vector) { } @@ -209,10 +207,6 @@ extern int timer_uses_ioapic_pin_0; extern int sis_apic_bug; /* dummy */ -#else /* !CONFIG_X86_IO_APIC */ -#define io_apic_assign_pci_irqs 0 -#endif - extern int assign_irq_vector(int irq); void enable_NMI_through_LVT0 (void * dummy); diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h index 9db5a1b4f7b1..43469d8ab71a 100644 --- a/include/asm-x86_64/irq.h +++ b/include/asm-x86_64/irq.h @@ -44,9 +44,7 @@ static __inline__ int irq_canonicalize(int irq) return ((irq == 2) ? 9 : irq); } -#ifdef CONFIG_X86_LOCAL_APIC #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ -#endif #ifdef CONFIG_HOTPLUG_CPU #include <linux/cpumask.h> diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h index c564bae03433..5fab957e1091 100644 --- a/include/asm-x86_64/kexec.h +++ b/include/asm-x86_64/kexec.h @@ -1,6 +1,27 @@ #ifndef _X86_64_KEXEC_H #define _X86_64_KEXEC_H +#define PA_CONTROL_PAGE 0 +#define VA_CONTROL_PAGE 1 +#define PA_PGD 2 +#define VA_PGD 3 +#define PA_PUD_0 4 +#define VA_PUD_0 5 +#define PA_PMD_0 6 +#define VA_PMD_0 7 +#define PA_PTE_0 8 +#define VA_PTE_0 9 +#define PA_PUD_1 10 +#define VA_PUD_1 11 +#define PA_PMD_1 12 +#define VA_PMD_1 13 +#define PA_PTE_1 14 +#define VA_PTE_1 15 +#define PA_TABLE_PAGE 16 +#define PAGES_NR 17 + +#ifndef __ASSEMBLY__ + #include <linux/string.h> #include <asm/page.h> @@ -64,4 +85,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs, newregs->rip = (unsigned long)current_text_addr(); } } + +NORET_TYPE void +relocate_kernel(unsigned long indirection_page, + unsigned long page_list, + unsigned long start_address) ATTRIB_NORET; + +#endif /* __ASSEMBLY__ */ + #endif /* _X86_64_KEXEC_H */ diff --git a/include/asm-x86_64/linkage.h b/include/asm-x86_64/linkage.h index 291c2d01c44f..b5f39d0189ce 100644 --- a/include/asm-x86_64/linkage.h +++ b/include/asm-x86_64/linkage.h @@ -1,6 +1,6 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H -/* Nothing to see here... */ +#define __ALIGN .p2align 4,,15 #endif diff --git a/include/asm-x86_64/mach_apic.h b/include/asm-x86_64/mach_apic.h index 0acea44c9377..d33422450c00 100644 --- a/include/asm-x86_64/mach_apic.h +++ b/include/asm-x86_64/mach_apic.h @@ -16,7 +16,6 @@ #define INT_DELIVERY_MODE (genapic->int_delivery_mode) #define INT_DEST_MODE (genapic->int_dest_mode) -#define INT_DELIVERY_DEST (genapic->int_delivery_dest) #define TARGET_CPUS (genapic->target_cpus()) #define apic_id_registered (genapic->apic_id_registered) #define init_apic_ldr (genapic->init_apic_ldr) diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h index d13687dfd691..5a11146d6d9c 100644 --- a/include/asm-x86_64/mce.h +++ b/include/asm-x86_64/mce.h @@ -99,6 +99,8 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) } #endif +void mce_log_therm_throt_event(unsigned int cpu, __u64 status); + extern atomic_t mce_entry; #endif diff --git a/include/asm-x86_64/mmx.h b/include/asm-x86_64/mmx.h deleted file mode 100644 index 46b71da99869..000000000000 --- a/include/asm-x86_64/mmx.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _ASM_MMX_H -#define _ASM_MMX_H - -/* - * MMX 3Dnow! helper operations - */ - -#include <linux/types.h> - -extern void *_mmx_memcpy(void *to, const void *from, size_t size); -extern void mmx_clear_page(void *page); -extern void mmx_copy_page(void *to, void *from); - -#endif diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h index 14fc3ddd9031..017fddb61dc5 100644 --- a/include/asm-x86_64/mpspec.h +++ b/include/asm-x86_64/mpspec.h @@ -159,13 +159,7 @@ struct mpc_config_lintsrc #define MAX_MP_BUSSES 256 /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) -enum mp_bustype { - MP_BUS_ISA = 1, - MP_BUS_EISA, - MP_BUS_PCI, - MP_BUS_MCA -}; -extern unsigned char mp_bus_id_to_type [MAX_MP_BUSSES]; +extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; extern unsigned int boot_cpu_physical_apicid; @@ -178,18 +172,15 @@ extern int mp_irq_entries; extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; extern int mpc_default_type; extern unsigned long mp_lapic_addr; -extern int pic_mode; #ifdef CONFIG_ACPI extern void mp_register_lapic (u8 id, u8 enabled); extern void mp_register_lapic_address (u64 address); -#ifdef CONFIG_X86_IO_APIC extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); extern void mp_config_acpi_legacy_irqs (void); extern int mp_register_gsi (u32 gsi, int triggering, int polarity); -#endif /*CONFIG_X86_IO_APIC*/ #endif extern int using_apic_timer; diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index 10f8b51cec8b..37e194169fac 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h @@ -66,14 +66,25 @@ #define rdtscl(low) \ __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") +#define rdtscp(low,high,aux) \ + asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) + #define rdtscll(val) do { \ unsigned int __a,__d; \ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ } while(0) +#define rdtscpll(val, aux) do { \ + unsigned long __a, __d; \ + asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ + (val) = (__d << 32) | __a; \ +} while (0) + #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) +#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) + #define rdpmc(counter,low,high) \ __asm__ __volatile__("rdpmc" \ : "=a" (low), "=d" (high) \ diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h index 06fab6de2a88..16396b1de3e4 100644 --- a/include/asm-x86_64/mutex.h +++ b/include/asm-x86_64/mutex.h @@ -25,13 +25,9 @@ do { \ \ __asm__ __volatile__( \ LOCK_PREFIX " decl (%%rdi) \n" \ - " js 2f \n" \ - "1: \n" \ - \ - LOCK_SECTION_START("") \ - "2: call "#fail_fn" \n" \ - " jmp 1b \n" \ - LOCK_SECTION_END \ + " jns 1f \n" \ + " call "#fail_fn" \n" \ + "1:" \ \ :"=D" (dummy) \ : "D" (v) \ @@ -75,13 +71,9 @@ do { \ \ __asm__ __volatile__( \ LOCK_PREFIX " incl (%%rdi) \n" \ - " jle 2f \n" \ - "1: \n" \ - \ - LOCK_SECTION_START("") \ - "2: call "#fail_fn" \n" \ - " jmp 1b \n" \ - LOCK_SECTION_END \ + " jg 1f \n" \ + " call "#fail_fn" \n" \ + "1: " \ \ :"=D" (dummy) \ : "D" (v) \ diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h index efb45c894d76..cbf2669bca71 100644 --- a/include/asm-x86_64/nmi.h +++ b/include/asm-x86_64/nmi.h @@ -7,24 +7,13 @@ #include <linux/pm.h> #include <asm/io.h> -struct pt_regs; - -typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); - -/** - * set_nmi_callback - * - * Set a handler for an NMI. Only one handler may be - * set. Return 1 if the NMI was handled. - */ -void set_nmi_callback(nmi_callback_t callback); - /** - * unset_nmi_callback + * do_nmi_callback * - * Remove the handler previously set. + * Check to see if a callback exists and execute it. Return 1 + * if the handler exists and was handled successfully. */ -void unset_nmi_callback(void); +int do_nmi_callback(struct pt_regs *regs, int cpu); #ifdef CONFIG_PM @@ -48,25 +37,32 @@ static inline void unset_nmi_pm_callback(struct pm_dev * dev) #endif /* CONFIG_PM */ extern void default_do_nmi(struct pt_regs *); -extern void die_nmi(char *str, struct pt_regs *regs); +extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); #define get_nmi_reason() inb(0x61) extern int panic_on_timeout; extern int unknown_nmi_panic; +extern int nmi_watchdog_enabled; extern int check_nmi_watchdog(void); - -extern void setup_apic_nmi_watchdog (void); -extern int reserve_lapic_nmi(void); -extern void release_lapic_nmi(void); +extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); +extern int avail_to_resrv_perfctr_nmi(unsigned int); +extern int reserve_perfctr_nmi(unsigned int); +extern void release_perfctr_nmi(unsigned int); +extern int reserve_evntsel_nmi(unsigned int); +extern void release_evntsel_nmi(unsigned int); + +extern void setup_apic_nmi_watchdog (void *); +extern void stop_apic_nmi_watchdog (void *); extern void disable_timer_nmi_watchdog(void); extern void enable_timer_nmi_watchdog(void); -extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); +extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); extern void nmi_watchdog_default(void); extern int setup_nmi_watchdog(char *); +extern atomic_t nmi_active; extern unsigned int nmi_watchdog; #define NMI_DEFAULT -1 #define NMI_NONE 0 diff --git a/include/asm-x86_64/pci-direct.h b/include/asm-x86_64/pci-direct.h index 036b6ca5b53b..eba9cb471df3 100644 --- a/include/asm-x86_64/pci-direct.h +++ b/include/asm-x86_64/pci-direct.h @@ -2,47 +2,15 @@ #define ASM_PCI_DIRECT_H 1 #include <linux/types.h> -#include <asm/io.h> /* Direct PCI access. This is used for PCI accesses in early boot before the PCI subsystem works. */ -#define PDprintk(x...) +extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset); +extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); +extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset); +extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val); -static inline u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset) -{ - u32 v; - outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); - v = inl(0xcfc); - if (v != 0xffffffff) - PDprintk("%x reading 4 from %x: %x\n", slot, offset, v); - return v; -} - -static inline u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset) -{ - u8 v; - outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); - v = inb(0xcfc + (offset&3)); - PDprintk("%x reading 1 from %x: %x\n", slot, offset, v); - return v; -} - -static inline u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset) -{ - u16 v; - outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); - v = inw(0xcfc + (offset&2)); - PDprintk("%x reading 2 from %x: %x\n", slot, offset, v); - return v; -} - -static inline void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, - u32 val) -{ - PDprintk("%x writing to %x: %x\n", slot, offset, val); - outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); - outl(val, 0xcfc); -} +extern int early_pci_allowed(void); #endif diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h index b47c3df9ed1d..14996d962bac 100644 --- a/include/asm-x86_64/pda.h +++ b/include/asm-x86_64/pda.h @@ -9,20 +9,24 @@ /* Per processor datastructure. %gs points to it while the kernel runs */ struct x8664_pda { - struct task_struct *pcurrent; /* Current process */ - unsigned long data_offset; /* Per cpu data offset from linker address */ - unsigned long kernelstack; /* top of kernel stack for current */ - unsigned long oldrsp; /* user rsp for system call */ -#if DEBUG_STKSZ > EXCEPTION_STKSZ - unsigned long debugstack; /* #DB/#BP stack. */ + struct task_struct *pcurrent; /* 0 Current process */ + unsigned long data_offset; /* 8 Per cpu data offset from linker + address */ + unsigned long kernelstack; /* 16 top of kernel stack for current */ + unsigned long oldrsp; /* 24 user rsp for system call */ + int irqcount; /* 32 Irq nesting counter. Starts with -1 */ + int cpunumber; /* 36 Logical CPU number */ +#ifdef CONFIG_CC_STACKPROTECTOR + unsigned long stack_canary; /* 40 stack canary value */ + /* gcc-ABI: this canary MUST be at + offset 40!!! */ #endif - int irqcount; /* Irq nesting counter. Starts with -1 */ - int cpunumber; /* Logical CPU number */ - char *irqstackptr; /* top of irqstack */ + char *irqstackptr; int nodenumber; /* number of current node */ unsigned int __softirq_pending; unsigned int __nmi_count; /* number of NMI on this CPUs */ - int mmu_state; + short mmu_state; + short isidle; struct mm_struct *active_mm; unsigned apic_timer_irqs; } ____cacheline_aligned_in_smp; @@ -36,44 +40,69 @@ extern struct x8664_pda boot_cpu_pda[]; * There is no fast way to get the base address of the PDA, all the accesses * have to mention %fs/%gs. So it needs to be done this Torvaldian way. */ -#define sizeof_field(type,field) (sizeof(((type *)0)->field)) -#define typeof_field(type,field) typeof(((type *)0)->field) +extern void __bad_pda_field(void) __attribute__((noreturn)); -extern void __bad_pda_field(void); +/* + * proxy_pda doesn't actually exist, but tell gcc it is accessed for + * all PDA accesses so it gets read/write dependencies right. + */ +extern struct x8664_pda _proxy_pda; #define pda_offset(field) offsetof(struct x8664_pda, field) -#define pda_to_op(op,field,val) do { \ - typedef typeof_field(struct x8664_pda, field) T__; \ - switch (sizeof_field(struct x8664_pda, field)) { \ -case 2: \ -asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ -case 4: \ -asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ -case 8: \ -asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ - default: __bad_pda_field(); \ - } \ +#define pda_to_op(op,field,val) do { \ + typedef typeof(_proxy_pda.field) T__; \ + if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ + switch (sizeof(_proxy_pda.field)) { \ + case 2: \ + asm(op "w %1,%%gs:%c2" : \ + "+m" (_proxy_pda.field) : \ + "ri" ((T__)val), \ + "i"(pda_offset(field))); \ + break; \ + case 4: \ + asm(op "l %1,%%gs:%c2" : \ + "+m" (_proxy_pda.field) : \ + "ri" ((T__)val), \ + "i" (pda_offset(field))); \ + break; \ + case 8: \ + asm(op "q %1,%%gs:%c2": \ + "+m" (_proxy_pda.field) : \ + "ri" ((T__)val), \ + "i"(pda_offset(field))); \ + break; \ + default: \ + __bad_pda_field(); \ + } \ } while (0) -/* - * AK: PDA read accesses should be neither volatile nor have an memory clobber. - * Unfortunately removing them causes all hell to break lose currently. - */ -#define pda_from_op(op,field) ({ \ - typeof_field(struct x8664_pda, field) ret__; \ - switch (sizeof_field(struct x8664_pda, field)) { \ -case 2: \ -asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ -case 4: \ -asm volatile(op "l %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ -case 8: \ -asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ - default: __bad_pda_field(); \ - } \ +#define pda_from_op(op,field) ({ \ + typeof(_proxy_pda.field) ret__; \ + switch (sizeof(_proxy_pda.field)) { \ + case 2: \ + asm(op "w %%gs:%c1,%0" : \ + "=r" (ret__) : \ + "i" (pda_offset(field)), \ + "m" (_proxy_pda.field)); \ + break; \ + case 4: \ + asm(op "l %%gs:%c1,%0": \ + "=r" (ret__): \ + "i" (pda_offset(field)), \ + "m" (_proxy_pda.field)); \ + break; \ + case 8: \ + asm(op "q %%gs:%c1,%0": \ + "=r" (ret__) : \ + "i" (pda_offset(field)), \ + "m" (_proxy_pda.field)); \ + break; \ + default: \ + __bad_pda_field(); \ + } \ ret__; }) - #define read_pda(field) pda_from_op("mov",field) #define write_pda(field,val) pda_to_op("mov",field,val) #define add_pda(field,val) pda_to_op("add",field,val) diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index bffb2f886a51..285756010c51 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h @@ -11,6 +11,16 @@ #include <asm/pda.h> +#ifdef CONFIG_MODULES +# define PERCPU_MODULE_RESERVE 8192 +#else +# define PERCPU_MODULE_RESERVE 0 +#endif + +#define PERCPU_ENOUGH_ROOM \ + (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ + PERCPU_MODULE_RESERVE) + #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) #define __my_cpu_offset() read_pda(data_offset) diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 51eba2395171..6899e770b173 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -21,12 +21,9 @@ extern unsigned long __supported_pte_mask; #define swapper_pg_dir init_level4_pgt -extern int nonx_setup(char *str); extern void paging_init(void); extern void clear_kernel_mapping(unsigned long addr, unsigned long size); -extern unsigned long pgkern_mask; - /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -265,7 +262,7 @@ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; } static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } -static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; } +static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } @@ -278,11 +275,12 @@ static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } -static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } +static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; } static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } +static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; } struct vm_area_struct; diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 038fe1f47e6f..b73d0c76613c 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -51,10 +51,8 @@ extern unsigned long long monotonic_base; extern int sysctl_vsyscall; extern int nohpet; extern unsigned long vxtime_hz; +extern void time_init_gtod(void); -extern int numa_setup(char *opt); - -extern int setup_early_printk(char *); extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); extern void early_identify_cpu(struct cpuinfo_x86 *c); @@ -91,7 +89,7 @@ extern void syscall32_cpu_init(void); extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); -extern void check_ioapic(void); +extern void early_quirks(void); extern void check_efer(void); extern int unhandled_signal(struct task_struct *tsk, int sig); @@ -103,13 +101,7 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c); extern unsigned long table_start, table_end; extern int exception_trace; -extern int using_apic_timer; -extern int disable_apic; extern unsigned cpu_khz; -extern int ioapic_force; -extern int skip_ioapic_setup; -extern int acpi_ht; -extern int acpi_disabled; extern void no_iommu_init(void); extern int force_iommu, no_iommu; @@ -131,7 +123,8 @@ extern int fix_aperture; extern int reboot_force; extern int notsc_setup(char *); -extern int setup_additional_cpus(char *); + +extern int gsi_irq_sharing(int gsi); extern void smp_local_timer_interrupt(struct pt_regs * regs); diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h index dea0e9459264..72aeebed920b 100644 --- a/include/asm-x86_64/rwlock.h +++ b/include/asm-x86_64/rwlock.h @@ -18,69 +18,9 @@ #ifndef _ASM_X86_64_RWLOCK_H #define _ASM_X86_64_RWLOCK_H -#include <linux/stringify.h> - #define RW_LOCK_BIAS 0x01000000 -#define RW_LOCK_BIAS_STR "0x01000000" - -#define __build_read_lock_ptr(rw, helper) \ - asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \ - "js 2f\n" \ - "1:\n" \ - LOCK_SECTION_START("") \ - "2:\tcall " helper "\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END \ - ::"a" (rw) : "memory") - -#define __build_read_lock_const(rw, helper) \ - asm volatile(LOCK_PREFIX "subl $1,%0\n\t" \ - "js 2f\n" \ - "1:\n" \ - LOCK_SECTION_START("") \ - "2:\tpushq %%rax\n\t" \ - "leaq %0,%%rax\n\t" \ - "call " helper "\n\t" \ - "popq %%rax\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END \ - :"=m" (*((volatile int *)rw))::"memory") - -#define __build_read_lock(rw, helper) do { \ - if (__builtin_constant_p(rw)) \ - __build_read_lock_const(rw, helper); \ - else \ - __build_read_lock_ptr(rw, helper); \ - } while (0) - -#define __build_write_lock_ptr(rw, helper) \ - asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ - "jnz 2f\n" \ - "1:\n" \ - LOCK_SECTION_START("") \ - "2:\tcall " helper "\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END \ - ::"a" (rw) : "memory") - -#define __build_write_lock_const(rw, helper) \ - asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ - "jnz 2f\n" \ - "1:\n" \ - LOCK_SECTION_START("") \ - "2:\tpushq %%rax\n\t" \ - "leaq %0,%%rax\n\t" \ - "call " helper "\n\t" \ - "popq %%rax\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END \ - :"=m" (*((volatile long *)rw))::"memory") +#define RW_LOCK_BIAS_STR "0x01000000" -#define __build_write_lock(rw, helper) do { \ - if (__builtin_constant_p(rw)) \ - __build_write_lock_const(rw, helper); \ - else \ - __build_write_lock_ptr(rw, helper); \ - } while (0) +/* Actual code is in asm/spinlock.h or in arch/x86_64/lib/rwlock.S */ #endif diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h index d4bed33fb32c..334ddcdd8f92 100644 --- a/include/asm-x86_64/segment.h +++ b/include/asm-x86_64/segment.h @@ -20,15 +20,16 @@ #define __USER_CS 0x33 /* 6*8+3 */ #define __USER32_DS __USER_DS -#define GDT_ENTRY_TLS 1 #define GDT_ENTRY_TSS 8 /* needs two entries */ #define GDT_ENTRY_LDT 10 /* needs two entries */ #define GDT_ENTRY_TLS_MIN 12 #define GDT_ENTRY_TLS_MAX 14 -/* 15 free */ #define GDT_ENTRY_TLS_ENTRIES 3 +#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */ +#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3) + /* TLS indexes for 64bit - hardcoded in arch_prctl */ #define FS_TLS 0 #define GS_TLS 1 diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h index 064df08b9a0f..107bd90429e8 100644 --- a/include/asm-x86_64/semaphore.h +++ b/include/asm-x86_64/semaphore.h @@ -107,12 +107,9 @@ static inline void down(struct semaphore * sem) __asm__ __volatile__( "# atomic down operation\n\t" LOCK_PREFIX "decl %0\n\t" /* --sem->count */ - "js 2f\n" - "1:\n" - LOCK_SECTION_START("") - "2:\tcall __down_failed\n\t" - "jmp 1b\n" - LOCK_SECTION_END + "jns 1f\n\t" + "call __down_failed\n" + "1:" :"=m" (sem->count) :"D" (sem) :"memory"); @@ -130,14 +127,11 @@ static inline int down_interruptible(struct semaphore * sem) __asm__ __volatile__( "# atomic interruptible down operation\n\t" + "xorl %0,%0\n\t" LOCK_PREFIX "decl %1\n\t" /* --sem->count */ - "js 2f\n\t" - "xorl %0,%0\n" - "1:\n" - LOCK_SECTION_START("") - "2:\tcall __down_failed_interruptible\n\t" - "jmp 1b\n" - LOCK_SECTION_END + "jns 2f\n\t" + "call __down_failed_interruptible\n" + "2:\n" :"=a" (result), "=m" (sem->count) :"D" (sem) :"memory"); @@ -154,14 +148,11 @@ static inline int down_trylock(struct semaphore * sem) __asm__ __volatile__( "# atomic interruptible down operation\n\t" + "xorl %0,%0\n\t" LOCK_PREFIX "decl %1\n\t" /* --sem->count */ - "js 2f\n\t" - "xorl %0,%0\n" - "1:\n" - LOCK_SECTION_START("") - "2:\tcall __down_failed_trylock\n\t" - "jmp 1b\n" - LOCK_SECTION_END + "jns 2f\n\t" + "call __down_failed_trylock\n\t" + "2:\n" :"=a" (result), "=m" (sem->count) :"D" (sem) :"memory","cc"); @@ -179,12 +170,9 @@ static inline void up(struct semaphore * sem) __asm__ __volatile__( "# atomic up operation\n\t" LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ - "jle 2f\n" - "1:\n" - LOCK_SECTION_START("") - "2:\tcall __up_wakeup\n\t" - "jmp 1b\n" - LOCK_SECTION_END + "jg 1f\n\t" + "call __up_wakeup\n" + "1:" :"=m" (sem->count) :"D" (sem) :"memory"); diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h index 3ede2a61973a..4581f978b299 100644 --- a/include/asm-x86_64/signal.h +++ b/include/asm-x86_64/signal.h @@ -24,10 +24,6 @@ typedef struct { } sigset_t; -struct pt_regs; -asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); - - #else /* Here we must cater to libcs that poke about in kernel headers. */ diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index ce97f65e1d10..d6b7c057edba 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -4,27 +4,18 @@ /* * We need the APIC definitions automatically as part of 'smp.h' */ -#ifndef __ASSEMBLY__ #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/bitops.h> extern int disable_apic; -#endif -#ifdef CONFIG_X86_LOCAL_APIC -#ifndef __ASSEMBLY__ #include <asm/fixmap.h> #include <asm/mpspec.h> -#ifdef CONFIG_X86_IO_APIC #include <asm/io_apic.h> -#endif #include <asm/apic.h> #include <asm/thread_info.h> -#endif -#endif #ifdef CONFIG_SMP -#ifndef ASSEMBLY #include <asm/pda.h> @@ -42,7 +33,6 @@ extern cpumask_t cpu_initialized; extern void smp_alloc_memory(void); extern volatile unsigned long smp_invalidate_needed; -extern int pic_mode; extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); extern int smp_num_siblings; @@ -74,20 +64,16 @@ static inline int hard_smp_processor_id(void) return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); } -extern int safe_smp_processor_id(void); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); extern unsigned num_processors; extern unsigned disabled_cpus; -#endif /* !ASSEMBLY */ - #define NO_PROC_ID 0xFF /* No processor magic marker */ #endif -#ifndef ASSEMBLY /* * Some lowlevel functions might want to know about * the real APIC ID <-> CPU # mapping. @@ -109,11 +95,8 @@ static inline int cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -#endif /* !ASSEMBLY */ - #ifndef CONFIG_SMP #define stack_smp_processor_id() 0 -#define safe_smp_processor_id() 0 #define cpu_logical_map(x) (x) #else #include <asm/thread_info.h> @@ -125,19 +108,23 @@ static inline int cpu_present_to_apicid(int mps_cpu) }) #endif -#ifndef __ASSEMBLY__ static __inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); } -#endif #ifdef CONFIG_SMP #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #else #define cpu_physical_id(cpu) boot_cpu_id -#endif - +static inline int smp_call_function_single(int cpuid, void (*func) (void *info), + void *info, int retry, int wait) +{ + /* Disable interrupts here? */ + func(info); + return 0; +} +#endif /* !CONFIG_SMP */ #endif diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 248a79f0eaff..be7a9e629fb2 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -16,31 +16,23 @@ * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) \ - (*(volatile signed int *)(&(x)->slock) <= 0) - -#define __raw_spin_lock_string \ - "\n1:\t" \ - LOCK_PREFIX " ; decl %0\n\t" \ - "js 2f\n" \ - LOCK_SECTION_START("") \ - "2:\t" \ - "rep;nop\n\t" \ - "cmpl $0,%0\n\t" \ - "jle 2b\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END - -#define __raw_spin_lock_string_up \ - "\n\tdecl %0" - -#define __raw_spin_unlock_string \ - "movl $1,%0" \ - :"=m" (lock->slock) : : "memory" +static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +{ + return *(volatile signed int *)(&(lock)->slock) <= 0; +} static inline void __raw_spin_lock(raw_spinlock_t *lock) { - asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory"); + asm volatile( + "\n1:\t" + LOCK_PREFIX " ; decl %0\n\t" + "jns 2f\n" + "3:\n" + "rep;nop\n\t" + "cmpl $0,%0\n\t" + "jle 3b\n\t" + "jmp 1b\n" + "2:\t" : "=m" (lock->slock) : : "memory"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) @@ -49,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) { int oldval; - __asm__ __volatile__( + asm volatile( "xchgl %0,%1" :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); @@ -59,13 +51,14 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - __raw_spin_unlock_string - ); + asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory"); } -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +{ + while (__raw_spin_is_locked(lock)) + cpu_relax(); +} /* * Read-write spinlocks, allowing multiple readers @@ -79,26 +72,34 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * * On x86, we implement read-write locks as a 32-bit counter * with the high bit (sign) being the "contended" bit. - * - * The inline assembly is non-obvious. Think about it. - * - * Changed to use the same technique as rw semaphores. See - * semaphore.h for details. -ben - * - * the helpers are in arch/i386/kernel/semaphore.c */ -#define __raw_read_can_lock(x) ((int)(x)->lock > 0) -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +static inline int __raw_read_can_lock(raw_rwlock_t *lock) +{ + return (int)(lock)->lock > 0; +} + +static inline int __raw_write_can_lock(raw_rwlock_t *lock) +{ + return (lock)->lock == RW_LOCK_BIAS; +} static inline void __raw_read_lock(raw_rwlock_t *rw) { - __build_read_lock(rw, "__read_lock_failed"); + asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" + "jns 1f\n" + "call __read_lock_failed\n" + "1:\n" + ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); } static inline void __raw_write_lock(raw_rwlock_t *rw) { - __build_write_lock(rw, "__write_lock_failed"); + asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" + "jz 1f\n" + "\tcall __write_lock_failed\n\t" + "1:\n" + ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); } static inline int __raw_read_trylock(raw_rwlock_t *lock) diff --git a/include/asm-x86_64/stacktrace.h b/include/asm-x86_64/stacktrace.h new file mode 100644 index 000000000000..5eb9799bef76 --- /dev/null +++ b/include/asm-x86_64/stacktrace.h @@ -0,0 +1,18 @@ +#ifndef _ASM_STACKTRACE_H +#define _ASM_STACKTRACE_H 1 + +/* Generic stack tracer with callbacks */ + +struct stacktrace_ops { + void (*warning)(void *data, char *msg); + /* msg must contain %s for the symbol */ + void (*warning_symbol)(void *data, char *msg, unsigned long symbol); + void (*address)(void *data, unsigned long address); + /* On negative return stop dumping */ + int (*stack)(void *data, char *name); +}; + +void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack, + struct stacktrace_ops *ops, void *data); + +#endif diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 6bf170bceae1..bd376bc8c4ab 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -14,12 +14,13 @@ #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" /* frame pointer must be last for get_wchan */ -#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" -#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t" +#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" +#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" #define __EXTRA_CLOBBER \ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" +/* Save restore flags to clear handle leaking NT */ #define switch_to(prev,next,last) \ asm volatile(SAVE_CONTEXT \ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h index 53e9a68b3336..dbb047febc5e 100644 --- a/include/asm-x86_64/tce.h +++ b/include/asm-x86_64/tce.h @@ -24,7 +24,6 @@ #ifndef _ASM_X86_64_TCE_H #define _ASM_X86_64_TCE_H -extern void* tce_table_kva[]; extern unsigned int specified_table_size; struct iommu_table; diff --git a/include/asm-x86_64/therm_throt.h b/include/asm-x86_64/therm_throt.h new file mode 100644 index 000000000000..5aac059007ba --- /dev/null +++ b/include/asm-x86_64/therm_throt.h @@ -0,0 +1 @@ +#include <asm-i386/therm_throt.h> diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h index 2029b00351f3..787a08114b48 100644 --- a/include/asm-x86_64/thread_info.h +++ b/include/asm-x86_64/thread_info.h @@ -114,11 +114,14 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_IRET 5 /* force IRET */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ +#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ /* 16 free */ #define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ #define TIF_ABI_PENDING 19 #define TIF_MEMDIE 20 +#define TIF_DEBUG 21 /* uses debug registers */ +#define TIF_IO_BITMAP 22 /* uses I/O bitmap */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) @@ -128,9 +131,12 @@ static inline struct thread_info *stack_thread_info(void) #define _TIF_IRET (1<<TIF_IRET) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) +#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_IA32 (1<<TIF_IA32) #define _TIF_FORK (1<<TIF_FORK) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) +#define _TIF_DEBUG (1<<TIF_DEBUG) +#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK \ @@ -138,6 +144,9 @@ static inline struct thread_info *stack_thread_info(void) /* work to do on any return to user space */ #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) +/* flags to check in __switch_to() */ +#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP) + #define PREEMPT_ACTIVE 0x10000000 /* diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h index d16d5b60f419..983bd296c81a 100644 --- a/include/asm-x86_64/tlbflush.h +++ b/include/asm-x86_64/tlbflush.h @@ -4,44 +4,44 @@ #include <linux/mm.h> #include <asm/processor.h> -#define __flush_tlb() \ - do { \ - unsigned long tmpreg; \ - \ - __asm__ __volatile__( \ - "movq %%cr3, %0; # flush TLB \n" \ - "movq %0, %%cr3; \n" \ - : "=r" (tmpreg) \ - :: "memory"); \ - } while (0) +static inline unsigned long get_cr3(void) +{ + unsigned long cr3; + asm volatile("mov %%cr3,%0" : "=r" (cr3)); + return cr3; +} -/* - * Global pages have to be flushed a bit differently. Not a real - * performance problem because this does not happen often. - */ -#define __flush_tlb_global() \ - do { \ - unsigned long tmpreg, cr4, cr4_orig; \ - \ - __asm__ __volatile__( \ - "movq %%cr4, %2; # turn off PGE \n" \ - "movq %2, %1; \n" \ - "andq %3, %1; \n" \ - "movq %1, %%cr4; \n" \ - "movq %%cr3, %0; # flush TLB \n" \ - "movq %0, %%cr3; \n" \ - "movq %2, %%cr4; # turn PGE back on \n" \ - : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ - : "i" (~X86_CR4_PGE) \ - : "memory"); \ - } while (0) - -extern unsigned long pgkern_mask; - -#define __flush_tlb_all() __flush_tlb_global() +static inline void set_cr3(unsigned long cr3) +{ + asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory"); +} + +static inline void __flush_tlb(void) +{ + set_cr3(get_cr3()); +} + +static inline unsigned long get_cr4(void) +{ + unsigned long cr4; + asm volatile("mov %%cr4,%0" : "=r" (cr4)); + return cr4; +} + +static inline void set_cr4(unsigned long cr4) +{ + asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory"); +} + +static inline void __flush_tlb_all(void) +{ + unsigned long cr4 = get_cr4(); + set_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ + set_cr4(cr4); /* write old PGE again and flush TLBs */ +} #define __flush_tlb_one(addr) \ - __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) + __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") /* diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h index 1e1fa003daa3..e856570c0689 100644 --- a/include/asm-x86_64/uaccess.h +++ b/include/asm-x86_64/uaccess.h @@ -84,7 +84,7 @@ struct exception_table_entry */ #define __get_user_x(size,ret,x,ptr) \ - __asm__ __volatile__("call __get_user_" #size \ + asm volatile("call __get_user_" #size \ :"=a" (ret),"=d" (x) \ :"c" (ptr) \ :"r8") @@ -101,7 +101,7 @@ struct exception_table_entry case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \ default: __get_user_bad(); break; \ } \ - (x) = (__typeof__(*(ptr)))__val_gu; \ + (x) = (typeof(*(ptr)))__val_gu; \ __ret_gu; \ }) @@ -112,7 +112,7 @@ extern void __put_user_8(void); extern void __put_user_bad(void); #define __put_user_x(size,ret,x,ptr) \ - __asm__ __volatile__("call __put_user_" #size \ + asm volatile("call __put_user_" #size \ :"=a" (ret) \ :"c" (ptr),"d" (x) \ :"r8") @@ -139,7 +139,7 @@ extern void __put_user_bad(void); #define __put_user_check(x,ptr,size) \ ({ \ int __pu_err; \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + typeof(*(ptr)) __user *__pu_addr = (ptr); \ switch (size) { \ case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \ case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \ @@ -173,7 +173,7 @@ struct __large_struct { unsigned long buf[100]; }; * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ - __asm__ __volatile__( \ + asm volatile( \ "1: mov"itype" %"rtype"1,%2\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ @@ -193,7 +193,7 @@ struct __large_struct { unsigned long buf[100]; }; int __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val,(ptr),(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (typeof(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -217,7 +217,7 @@ do { \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ - __asm__ __volatile__( \ + asm volatile( \ "1: mov"itype" %2,%"rtype"1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ @@ -237,15 +237,20 @@ do { \ */ /* Handles exceptions in both to and from, but doesn't do access_ok */ -extern unsigned long copy_user_generic(void *to, const void *from, unsigned len); - -extern unsigned long copy_to_user(void __user *to, const void *from, unsigned len); -extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); -extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); - -static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size) +__must_check unsigned long +copy_user_generic(void *to, const void *from, unsigned len); + +__must_check unsigned long +copy_to_user(void __user *to, const void *from, unsigned len); +__must_check unsigned long +copy_from_user(void *to, const void __user *from, unsigned len); +__must_check unsigned long +copy_in_user(void __user *to, const void __user *from, unsigned len); + +static __always_inline __must_check +int __copy_from_user(void *dst, const void __user *src, unsigned size) { - int ret = 0; + int ret = 0; if (!__builtin_constant_p(size)) return copy_user_generic(dst,(__force void *)src,size); switch (size) { @@ -272,9 +277,10 @@ static __always_inline int __copy_from_user(void *dst, const void __user *src, u } } -static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size) +static __always_inline __must_check +int __copy_to_user(void __user *dst, const void *src, unsigned size) { - int ret = 0; + int ret = 0; if (!__builtin_constant_p(size)) return copy_user_generic((__force void *)dst,src,size); switch (size) { @@ -303,10 +309,10 @@ static __always_inline int __copy_to_user(void __user *dst, const void *src, uns } } - -static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) +static __always_inline __must_check +int __copy_in_user(void __user *dst, const void __user *src, unsigned size) { - int ret = 0; + int ret = 0; if (!__builtin_constant_p(size)) return copy_user_generic((__force void *)dst,(__force void *)src,size); switch (size) { @@ -344,15 +350,17 @@ static __always_inline int __copy_in_user(void __user *dst, const void __user *s } } -long strncpy_from_user(char *dst, const char __user *src, long count); -long __strncpy_from_user(char *dst, const char __user *src, long count); -long strnlen_user(const char __user *str, long n); -long __strnlen_user(const char __user *str, long n); -long strlen_user(const char __user *str); -unsigned long clear_user(void __user *mem, unsigned long len); -unsigned long __clear_user(void __user *mem, unsigned long len); - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +__must_check long +strncpy_from_user(char *dst, const char __user *src, long count); +__must_check long +__strncpy_from_user(char *dst, const char __user *src, long count); +__must_check long strnlen_user(const char __user *str, long n); +__must_check long __strnlen_user(const char __user *str, long n); +__must_check long strlen_user(const char __user *str); +__must_check unsigned long clear_user(void __user *mem, unsigned long len); +__must_check unsigned long __clear_user(void __user *mem, unsigned long len); + +__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size); +#define __copy_to_user_inatomic copy_user_generic #endif /* __X86_64_UACCESS_H */ diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 80fd48e84bbb..eeb98c168e98 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -600,9 +600,9 @@ __SYSCALL(__NR_fchmodat, sys_fchmodat) #define __NR_faccessat 269 __SYSCALL(__NR_faccessat, sys_faccessat) #define __NR_pselect6 270 -__SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */ +__SYSCALL(__NR_pselect6, sys_pselect6) #define __NR_ppoll 271 -__SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ +__SYSCALL(__NR_ppoll, sys_ppoll) #define __NR_unshare 272 __SYSCALL(__NR_unshare, sys_unshare) #define __NR_set_robust_list 273 @@ -658,6 +658,7 @@ do { \ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_TIME #define __ARCH_WANT_COMPAT_SYS_TIME diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h index 1f6e9bfb569e..2e7ff10fd775 100644 --- a/include/asm-x86_64/unwind.h +++ b/include/asm-x86_64/unwind.h @@ -18,6 +18,7 @@ struct unwind_frame_info { struct pt_regs regs; struct task_struct *task; + unsigned call_frame:1; }; #define UNW_PC(frame) (frame)->regs.rip @@ -57,6 +58,10 @@ struct unwind_frame_info PTREGS_INFO(r15), \ PTREGS_INFO(rip) +#define UNW_DEFAULT_RA(raItem, dataAlign) \ + ((raItem).where == Memory && \ + !((raItem).value * (dataAlign) + 8)) + static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, /*const*/ struct pt_regs *regs) { @@ -94,8 +99,8 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info) #else -#define UNW_PC(frame) ((void)(frame), 0) -#define UNW_SP(frame) ((void)(frame), 0) +#define UNW_PC(frame) ((void)(frame), 0UL) +#define UNW_SP(frame) ((void)(frame), 0UL) static inline int arch_unw_user_mode(const void *info) { diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h index 146b24402a5f..2281e9399b96 100644 --- a/include/asm-x86_64/vsyscall.h +++ b/include/asm-x86_64/vsyscall.h @@ -4,6 +4,7 @@ enum vsyscall_num { __NR_vgettimeofday, __NR_vtime, + __NR_vgetcpu, }; #define VSYSCALL_START (-10UL << 20) @@ -15,6 +16,7 @@ enum vsyscall_num { #include <linux/seqlock.h> #define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16))) +#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16))) #define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16))) #define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) #define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16))) @@ -26,6 +28,9 @@ enum vsyscall_num { #define VXTIME_HPET 2 #define VXTIME_PMTMR 3 +#define VGETCPU_RDTSCP 1 +#define VGETCPU_LSL 2 + struct vxtime_data { long hpet_address; /* HPET base address */ int last; @@ -40,6 +45,7 @@ struct vxtime_data { /* vsyscall space (readonly) */ extern struct vxtime_data __vxtime; +extern int __vgetcpu_mode; extern struct timespec __xtime; extern volatile unsigned long __jiffies; extern unsigned long __wall_jiffies; @@ -48,6 +54,7 @@ extern seqlock_t __xtime_lock; /* kernel space (writeable) */ extern struct vxtime_data vxtime; +extern int vgetcpu_mode; extern unsigned long wall_jiffies; extern struct timezone sys_tz; extern int sysctl_vsyscall; @@ -55,6 +62,8 @@ extern seqlock_t xtime_lock; extern int sysctl_vsyscall; +extern void vsyscall_set_cpu(int cpu); + #define ARCH_HAVE_XTIME_LOCK 1 #endif /* __KERNEL__ */ diff --git a/include/linux/aer.h b/include/linux/aer.h new file mode 100644 index 000000000000..402e178b38eb --- /dev/null +++ b/include/linux/aer.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2006 Intel Corp. + * Tom Long Nguyen (tom.l.nguyen@intel.com) + * Zhang Yanmin (yanmin.zhang@intel.com) + */ + +#ifndef _AER_H_ +#define _AER_H_ + +#if defined(CONFIG_PCIEAER) +/* pci-e port driver needs this function to enable aer */ +extern int pci_enable_pcie_error_reporting(struct pci_dev *dev); +extern int pci_find_aer_capability(struct pci_dev *dev); +extern int pci_disable_pcie_error_reporting(struct pci_dev *dev); +extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); +#else +#define pci_enable_pcie_error_reporting(dev) do { } while (0) +#define pci_find_aer_capability(dev) do { } while (0) +#define pci_disable_pcie_error_reporting(dev) do { } while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do { } while (0) +#endif + +#endif //_AER_H_ + diff --git a/include/linux/edd.h b/include/linux/edd.h index 162512b886f7..b2b3e68aa512 100644 --- a/include/linux/edd.h +++ b/include/linux/edd.h @@ -52,6 +52,7 @@ #define EDD_CL_EQUALS 0x3d646465 /* "edd=" */ #define EDD_CL_OFF 0x666f /* "of" for off */ #define EDD_CL_SKIP 0x6b73 /* "sk" for skipmbr */ +#define EDD_CL_ON 0x6e6f /* "on" for on */ #ifndef __ASSEMBLY__ diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h new file mode 100644 index 000000000000..031ed3780e45 --- /dev/null +++ b/include/linux/getcpu.h @@ -0,0 +1,16 @@ +#ifndef _LINUX_GETCPU_H +#define _LINUX_GETCPU_H 1 + +/* Cache for getcpu() to speed it up. Results might be upto a jiffie + out of date, but will be faster. + User programs should not refer to the contents of this structure. + It is only a cache for vgetcpu(). It might change in future kernels. + The user program must store this information per thread (__thread) + If you want 100% accurate information pass NULL instead. */ +struct getcpu_cache { + unsigned long t0; + unsigned long t1; + unsigned long res[4]; +}; + +#endif diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 329ebcffa106..c8d5f207c3d4 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -115,6 +115,21 @@ static inline u64 get_jiffies_64(void) ((long)(a) - (long)(b) >= 0)) #define time_before_eq(a,b) time_after_eq(b,a) +/* Same as above, but does so with platform independent 64bit types. + * These must be used when utilizing jiffies_64 (i.e. return value of + * get_jiffies_64() */ +#define time_after64(a,b) \ + (typecheck(__u64, a) && \ + typecheck(__u64, b) && \ + ((__s64)(b) - (__s64)(a) < 0)) +#define time_before64(a,b) time_after64(b,a) + +#define time_after_eq64(a,b) \ + (typecheck(__u64, a) && \ + typecheck(__u64, b) && \ + ((__s64)(a) - (__s64)(b) >= 0)) +#define time_before_eq64(a,b) time_after_eq64(b,a) + /* * Have the 32 bit jiffies value wrap 5 minutes after boot * so jiffies wrap bugs show up earlier. diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e44a37e2c71c..4fa373bb18ac 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -187,6 +187,7 @@ extern void bust_spinlocks(int yes); extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ extern int panic_timeout; extern int panic_on_oops; +extern int panic_on_unrecovered_nmi; extern int tainted; extern const char *print_tainted(void); extern void add_taint(unsigned); diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 932021f872d5..6c9873f88287 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -35,9 +35,13 @@ #endif #define KPROBE_ENTRY(name) \ - .section .kprobes.text, "ax"; \ + .pushsection .kprobes.text, "ax"; \ ENTRY(name) +#define KPROBE_END(name) \ + END(name); \ + .popsection + #ifndef END #define END(name) \ .size name, .-name diff --git a/include/linux/pci.h b/include/linux/pci.h index 3ec72551ac31..5c3a4176eb64 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -356,6 +356,8 @@ struct pci_driver { struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; + + int multithread_probe; }; #define to_pci_driver(drv) container_of(drv,struct pci_driver, driver) @@ -431,7 +433,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn); struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn); void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); unsigned int pci_scan_child_bus(struct pci_bus *bus); -void pci_bus_add_device(struct pci_dev *dev); +int __must_check pci_bus_add_device(struct pci_dev *dev); void pci_read_bridge_bases(struct pci_bus *child); struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); @@ -439,6 +441,7 @@ extern struct pci_dev *pci_dev_get(struct pci_dev *dev); extern void pci_dev_put(struct pci_dev *dev); extern void pci_remove_bus(struct pci_bus *b); extern void pci_remove_bus_device(struct pci_dev *dev); +extern void pci_stop_bus_device(struct pci_dev *dev); void pci_setup_cardbus(struct pci_bus *bus); /* Generic PCI functions exported to card drivers */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 6a1e09834559..ab032ceafa84 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1411,6 +1411,7 @@ #define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009 #define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017 #define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103 +#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132 #define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200 #define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201 #define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203 @@ -1482,9 +1483,6 @@ #define PCI_DEVICE_ID_MARVELL_GT64260 0x6430 #define PCI_DEVICE_ID_MARVELL_MV64360 0x6460 #define PCI_DEVICE_ID_MARVELL_MV64460 0x6480 -#define PCI_DEVICE_ID_MARVELL_GT96100 0x9652 -#define PCI_DEVICE_ID_MARVELL_GT96100A 0x9653 - #define PCI_VENDOR_ID_V3 0x11b0 #define PCI_DEVICE_ID_V3_V960 0x0001 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 96930cb5927c..7d0e26cba420 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -196,7 +196,7 @@ #define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */ #define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */ #define PCI_CAP_ID_PCIX 0x07 /* PCI-X */ -#define PCI_CAP_ID_HT_IRQCONF 0x08 /* HyperTransport IRQ Configuration */ +#define PCI_CAP_ID_HT 0x08 /* HyperTransport */ #define PCI_CAP_ID_VNDR 0x09 /* Vendor specific capability */ #define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */ #define PCI_CAP_ID_EXP 0x10 /* PCI Express */ diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h index b44e01a70914..6cd91e3f9820 100644 --- a/include/linux/pcieport_if.h +++ b/include/linux/pcieport_if.h @@ -62,6 +62,12 @@ struct pcie_port_service_driver { int (*suspend) (struct pcie_device *dev, pm_message_t state); int (*resume) (struct pcie_device *dev); + /* Service Error Recovery Handler */ + struct pci_error_handlers *err_handler; + + /* Link Reset Capability - AER service driver specific */ + pci_ers_result_t (*reset_link) (struct pci_dev *dev); + const struct pcie_port_service_id *id_table; struct device_driver driver; }; diff --git a/include/linux/sched.h b/include/linux/sched.h index 34ed0d99b1bd..9d4aa7f95bc8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -819,6 +819,11 @@ struct task_struct { unsigned did_exec:1; pid_t pid; pid_t tgid; + +#ifdef CONFIG_CC_STACKPROTECTOR + /* Canary value for the -fstack-protector gcc feature */ + unsigned long stack_canary; +#endif /* * pointers to (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with @@ -865,6 +870,15 @@ struct task_struct { struct key *thread_keyring; /* keyring private to this thread */ unsigned char jit_keyring; /* default keyring to attach requested keys to */ #endif + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; int oomkilladj; /* OOM kill score adjustment (bit shift). */ char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 9cc81e572224..50e2b01e517c 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -5,15 +5,16 @@ struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; + int skip; /* input argument: How many entries to skip */ + int all_contexts; /* input argument: if true do than one stack */ }; extern void save_stack_trace(struct stack_trace *trace, - struct task_struct *task, int all_contexts, - unsigned int skip); + struct task_struct *task); extern void print_stack_trace(struct stack_trace *trace, int spaces); #else -# define save_stack_trace(trace, task, all, skip) do { } while (0) +# define save_stack_trace(trace, task) do { } while (0) # define print_stack_trace(trace) do { } while (0) #endif diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 008f04c56737..3f0f716225ec 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -53,6 +53,7 @@ struct mq_attr; struct compat_stat; struct compat_timeval; struct robust_list_head; +struct getcpu_cache; #include <linux/types.h> #include <linux/aio_abi.h> @@ -596,5 +597,6 @@ asmlinkage long sys_get_robust_list(int pid, size_t __user *len_ptr); asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, size_t len); +asmlinkage long sys_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *cache); #endif diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index eca555781d05..1b24bd45e080 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -150,6 +150,8 @@ enum KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ KERN_COMPAT_LOG=73, /* int: print compat layer messages */ KERN_MAX_LOCK_DEPTH=74, + KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */ + KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ }; diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index 46919f9f5eb3..4d0909e53595 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -24,5 +24,5 @@ #define VERMAGIC_STRING \ UTS_RELEASE " " \ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ - MODULE_VERMAGIC_MODULE_UNLOAD MODULE_ARCH_VERMAGIC \ - "gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__) + MODULE_VERMAGIC_MODULE_UNLOAD MODULE_ARCH_VERMAGIC + |