diff options
Diffstat (limited to 'arch')
177 files changed, 1610 insertions, 893 deletions
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 4759fe751aa1..2cc3cc519c54 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile @@ -12,7 +12,7 @@ NM := $(NM) -B LDFLAGS_vmlinux := -static -N #-relax CHECKFLAGS += -D__alpha__ -m64 -cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data +cflags-y := -pipe -mno-fp-regs -ffixed-8 cflags-y += $(call cc-option, -fno-jump-tables) cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h index 46cefbd50e73..bae97eb19d26 100644 --- a/arch/alpha/include/asm/floppy.h +++ b/arch/alpha/include/asm/floppy.h @@ -26,7 +26,7 @@ #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_cacheflush(addr,size) /* nothing */ #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ - IRQF_DISABLED, "floppy", NULL) + 0, "floppy", NULL) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #ifdef CONFIG_PCI diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2872accd2215..7b2be251c30f 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -117,13 +117,6 @@ handle_irq(int irq) return; } - /* - * From here we must proceed with IPL_MAX. Note that we do not - * explicitly enable interrupts afterwards - some MILO PALcode - * (namely LX164 one) seems to have severe problems with RTI - * at IPL 0. - */ - local_irq_disable(); irq_enter(); generic_handle_irq_desc(irq, desc); irq_exit(); diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 772ddfdb71a8..f433fc11877a 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr, struct pt_regs *regs) { struct pt_regs *old_regs; + + /* + * Disable interrupts during IRQ handling. + * Note that there is no matching local_irq_enable() due to + * severe problems with RTI at IPL0 and some MILO PALcode + * (namely LX164). + */ + local_irq_disable(); switch (type) { case 0: #ifdef CONFIG_SMP @@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector, { long cpu; - local_irq_disable(); smp_percpu_timer_interrupt(regs); cpu = smp_processor_id(); if (cpu != boot_cpuid) { @@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, .name = "timer", }; diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 4d4c046f708d..1383f8601a93 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); +static struct resource irongate_io = { + .name = "Irongate PCI IO", + .flags = IORESOURCE_IO, +}; static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, @@ -209,6 +213,7 @@ nautilus_init_pci(void) irongate = pci_get_bus_and_slot(0, 0); bus->self = irongate; + bus->resource[0] = &irongate_io; bus->resource[1] = &irongate_mem; pci_bus_size_bridges(bus); diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 5cf4a481b8c5..a53cf03f49d5 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -280,15 +280,15 @@ titan_late_init(void) * all reported to the kernel as machine checks, so the handler * is a nop so it can be called to count the individual events. */ - titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(63+16, titan_intr_nop, 0, "CChip Error", NULL); - titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(62+16, titan_intr_nop, 0, "PChip 0 H_Error", NULL); - titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(61+16, titan_intr_nop, 0, "PChip 1 H_Error", NULL); - titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(60+16, titan_intr_nop, 0, "PChip 0 C_Error", NULL); - titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(59+16, titan_intr_nop, 0, "PChip 1 C_Error", NULL); /* @@ -348,9 +348,9 @@ privateer_init_pci(void) * Hook a couple of extra err interrupts that the * common titan code won't. */ - titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(53+16, titan_intr_nop, 0, "NMI", NULL); - titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(50+16, titan_intr_nop, 0, "Temperature Warning", NULL); /* diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index ccd84806b62f..eac071668201 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void) " flag.nz %0 \n" : "=r"(temp), "=r"(flags) : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) - : "cc"); + : "memory", "cc"); return flags; } @@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags) __asm__ __volatile__( " flag %0 \n" : - : "r"(flags)); + : "r"(flags) + : "memory"); } /* @@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void) " and %0, %0, %1 \n" " flag %0 \n" : "=&r"(temp) - : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); + : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)) + : "memory"); } /* @@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void) __asm__ __volatile__( " lr %0, [status32] \n" - : "=&r"(temp)); + : "=&r"(temp) + : + : "memory"); return temp; } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 13b739469c51..1cacda426a0e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1183,9 +1183,9 @@ config ARM_NR_BANKS default 8 config IWMMXT - bool "Enable iWMMXt support" + bool "Enable iWMMXt support" if !CPU_PJ4 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 - default y if PXA27x || PXA3xx || ARCH_MMP + default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 help Enable support for iWMMXt context switching at run time if running on a CPU that supports it. @@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420 to deadlock. This workaround puts DSB before executing ISB if an abort may occur on cache maintenance. +config ARM_ERRATA_798181 + bool "ARM errata: TLBI/DSB failure on Cortex-A15" + depends on CPU_V7 && SMP + help + On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not + adequately shooting down all use of the old entries. This + option enables the Linux kernel workaround for this erratum + which sends an IPI to the CPUs that are running the same ASID + as the one being invalidated. + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts index dd0c57dd9f30..3234875824dc 100644 --- a/arch/arm/boot/dts/armada-370-mirabox.dts +++ b/arch/arm/boot/dts/armada-370-mirabox.dts @@ -54,7 +54,7 @@ }; mvsdio@d00d4000 { - pinctrl-0 = <&sdio_pins2>; + pinctrl-0 = <&sdio_pins3>; pinctrl-names = "default"; status = "okay"; /* diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 8188d138020e..a195debb67d3 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi @@ -59,6 +59,12 @@ "mpp50", "mpp51", "mpp52"; marvell,function = "sd0"; }; + + sdio_pins3: sdio-pins3 { + marvell,pins = "mpp48", "mpp49", "mpp50", + "mpp51", "mpp52", "mpp53"; + marvell,function = "sd0"; + }; }; gpio0: gpio@d0018100 { diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi index 9de93096601a..aaa63d0a8096 100644 --- a/arch/arm/boot/dts/dbx5x0.dtsi +++ b/arch/arm/boot/dts/dbx5x0.dtsi @@ -191,8 +191,8 @@ prcmu: prcmu@80157000 { compatible = "stericsson,db8500-prcmu"; - reg = <0x80157000 0x1000>; - reg-names = "prcmu"; + reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>; + reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm"; interrupts = <0 47 0x4>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts index 6ce3d17c3a29..fd36e1cca104 100644 --- a/arch/arm/boot/dts/imx28-m28evk.dts +++ b/arch/arm/boot/dts/imx28-m28evk.dts @@ -152,7 +152,6 @@ i2c0: i2c@80058000 { pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins_a>; - clock-frequency = <400000>; status = "okay"; sgtl5000: codec@0a { diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts index e6cde8aa7fff..6c6a5442800a 100644 --- a/arch/arm/boot/dts/imx28-sps1.dts +++ b/arch/arm/boot/dts/imx28-sps1.dts @@ -70,7 +70,6 @@ i2c0: i2c@80058000 { pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins_a>; - clock-frequency = <400000>; status = "okay"; rtc: rtc@51 { diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 06ec460b4581..281a223591ff 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -91,6 +91,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x00a00600 0x20>; interrupts = <1 13 0xf01>; + clocks = <&clks 15>; }; L2: l2-cache@00a02000 { diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts index bd83b8fc7c83..c3573be7b92c 100644 --- a/arch/arm/boot/dts/kirkwood-goflexnet.dts +++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts @@ -77,6 +77,7 @@ }; nand@3000000 { + chip-delay = <40>; status = "okay"; partition@0 { diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts index 93c3afbef9ee..3694e94f6e99 100644 --- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts +++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts @@ -96,11 +96,11 @@ marvell,function = "gpio"; }; pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { - marvell,pins = "mpp44"; + marvell,pins = "mpp46"; marvell,function = "gpio"; }; pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { - marvell,pins = "mpp45"; + marvell,pins = "mpp47"; marvell,function = "gpio"; }; @@ -157,14 +157,14 @@ gpios = <&gpio0 16 0>; linux,default-trigger = "default-on"; }; - health_led1 { + rebuild_led { + label = "status:white:rebuild_led"; + gpios = <&gpio1 4 0>; + }; + health_led { label = "status:red:health_led"; gpios = <&gpio1 5 0>; }; - health_led2 { - label = "status:white:health_led"; - gpios = <&gpio1 4 0>; - }; backup_led { label = "status:blue:backup_led"; gpios = <&gpio0 15 0>; diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi index 8aad00f81ed9..f7bec3b1ba32 100644 --- a/arch/arm/boot/dts/orion5x.dtsi +++ b/arch/arm/boot/dts/orion5x.dtsi @@ -13,6 +13,9 @@ compatible = "marvell,orion5x"; interrupt-parent = <&intc>; + aliases { + gpio0 = &gpio0; + }; intc: interrupt-controller { compatible = "marvell,orion-intc", "marvell,intc"; interrupt-controller; @@ -32,7 +35,9 @@ #gpio-cells = <2>; gpio-controller; reg = <0x10100 0x40>; - ngpio = <32>; + ngpios = <32>; + interrupt-controller; + #interrupt-cells = <2>; interrupts = <6>, <7>, <8>, <9>; }; @@ -91,7 +96,7 @@ reg = <0x90000 0x10000>, <0xf2200000 0x800>; reg-names = "regs", "sram"; - interrupts = <22>; + interrupts = <28>; status = "okay"; }; }; diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799fd3a81..dff714d886d5 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,7 +24,7 @@ extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); void (*udelay)(unsigned long); - bool const_clock; + unsigned long ticks_per_jiffy; } arm_delay_ops; #define __delay(n) arm_delay_ops.delay(n) diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index cca9f15704ed..ea289e1435e7 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -19,14 +19,6 @@ #undef _CACHE #undef MULTI_CACHE -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - #if defined(CONFIG_CPU_CACHE_V4) # ifdef _CACHE # define MULTI_CACHE 1 diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 02fe2fbe2477..ed94b1a366ae 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX processor registers */ #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 -#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 +#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000 #define IOP3XX_PERIPHERAL_SIZE 0x00002000 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ IOP3XX_PERIPHERAL_SIZE - 1) diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828f484d..91b99abe7a95 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page); #endif #endif +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif + #ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); #else diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a6611323c..a7b85e0d0cc1 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +DECLARE_PER_CPU(atomic64_t, active_asids); + #else /* !CONFIG_CPU_HAS_ASID */ #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 6ef8afd1b64c..86b8fe398b95 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -111,7 +111,7 @@ #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c8820f0d..ab865e65a84c 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -14,7 +14,6 @@ #include <asm/glue.h> -#define TLB_V3_PAGE (1 << 0) #define TLB_V4_U_PAGE (1 << 1) #define TLB_V4_D_PAGE (1 << 2) #define TLB_V4_I_PAGE (1 << 3) @@ -22,7 +21,6 @@ #define TLB_V6_D_PAGE (1 << 5) #define TLB_V6_I_PAGE (1 << 6) -#define TLB_V3_FULL (1 << 8) #define TLB_V4_U_FULL (1 << 9) #define TLB_V4_D_FULL (1 << 10) #define TLB_V4_I_FULL (1 << 11) @@ -52,7 +50,6 @@ * ============= * * We have the following to choose from: - * v3 - ARMv3 * v4 - ARMv4 without write buffer * v4wb - ARMv4 with write buffer without I TLB flush entry instruction * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction @@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); @@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { + if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); @@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && + if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); @@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); @@ -450,6 +443,21 @@ static inline void local_flush_bp_all(void) isb(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static inline void dummy_flush_tlb_a15_erratum(void) +{ + /* + * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. + */ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(); +} +#else +static inline void dummy_flush_tlb_a15_erratum(void) +{ +} +#endif + /* * flush_pmd_entry * diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 3248cde504ed..fefd7f971437 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old) */ .macro mcount_enter +/* + * This pad compensates for the push {lr} at the call site. Note that we are + * unable to unwind through a function which does not otherwise save its lr. + */ + UNWIND(.pad #4) stmdb sp!, {r0-r3, lr} + UNWIND(.save {r0-r3, lr}) .endm .macro mcount_get_lr reg @@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old) .endm ENTRY(__gnu_mcount_nc) +UNWIND(.fnstart) #ifdef CONFIG_DYNAMIC_FTRACE mov ip, lr ldmia sp!, {lr} @@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc) #else __mcount #endif +UNWIND(.fnend) ENDPROC(__gnu_mcount_nc) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller) +UNWIND(.fnstart) __ftrace_caller +UNWIND(.fnend) ENDPROC(ftrace_caller) #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) +UNWIND(.fnstart) __ftrace_graph_caller +UNWIND(.fnend) ENDPROC(ftrace_graph_caller) #endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index e0eb9a1cae77..8bac553fe213 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -267,7 +267,7 @@ __create_page_tables: addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] -#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) +#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) sub r4, r4, #4 @ Fixup page table pointer @ for 64-bit descriptors #endif diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 96093b75ab90..1fd749ee4a1b 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused) } if (err) { - pr_warning("CPU %d debug is powered down!\n", cpu); + pr_warn_once("CPU %d debug is powered down!\n", cpu); cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); return; } @@ -987,7 +987,7 @@ clear_vcr: isb(); if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to disable vector catch\n", cpu); + pr_warn_once("CPU %d failed to disable vector catch\n", cpu); return; } @@ -1007,7 +1007,7 @@ clear_vcr: } if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to clear debug register pairs\n", cpu); + pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); return; } @@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { +static struct notifier_block dbg_cpu_pm_nb = { .notifier_call = dbg_cpu_pm_notify, }; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 146157dfe27c..8c3094d0f7b7 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events, struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, event) >= 0; diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index bd6f56b9ec21..59d2adb764a9 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void) static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; -static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) +static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) { return (cyc * mult) >> shift; } -static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) +static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) { u64 epoch_ns; u32 epoch_cyc; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3f6cbb2e3eda..234e339196c0 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -56,7 +56,6 @@ #include <asm/virt.h> #include "atags.h" -#include "tcm.h" #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) @@ -353,6 +352,23 @@ void __init early_print(const char *str, ...) printk("%s", buf); } +static void __init cpuid_init_hwcaps(void) +{ + unsigned int divide_instrs; + + if (cpu_architecture() < CPU_ARCH_ARMv7) + return; + + divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; + + switch (divide_instrs) { + case 2: + elf_hwcap |= HWCAP_IDIVA; + case 1: + elf_hwcap |= HWCAP_IDIVT; + } +} + static void __init feat_v6_fixup(void) { int id = read_cpuid_id(); @@ -483,8 +499,11 @@ static void __init setup_processor(void) snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; + + cpuid_init_hwcaps(); + #ifndef CONFIG_ARM_THUMB - elf_hwcap &= ~HWCAP_THUMB; + elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); #endif feat_v6_fixup(); @@ -524,7 +543,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) size -= start & ~PAGE_MASK; bank->start = PAGE_ALIGN(start); -#ifndef CONFIG_LPAE +#ifndef CONFIG_ARM_LPAE if (bank->start + size < bank->start) { printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " "32-bit physical address space\n", (long long)start); @@ -778,8 +797,6 @@ void __init setup_arch(char **cmdline_p) reserve_crashkernel(); - tcm_init(); - #ifdef CONFIG_MULTI_IRQ_HANDLER handle_arch_irq = mdesc->handle_irq; #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 79078edbb9bc..1f2ccccaf009 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb, if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; - if (arm_delay_ops.const_clock) - return NOTIFY_OK; - if (!per_cpu(l_p_j_ref, cpu)) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index bd0300531399..e82e1d248772 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -12,6 +12,7 @@ #include <asm/smp_plat.h> #include <asm/tlbflush.h> +#include <asm/mmu_context.h> /**********************************************************************/ @@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored) local_flush_bp_all(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static int erratum_a15_798181(void) +{ + unsigned int midr = read_cpuid_id(); + + /* Cortex-A15 r0p0..r3p2 affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) + return 0; + return 1; +} +#else +static int erratum_a15_798181(void) +{ + return 0; +} +#endif + +static void ipi_flush_tlb_a15_erratum(void *arg) +{ + dmb(); +} + +static void broadcast_tlb_a15_erratum(void) +{ + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, + NULL, 1); +} + +static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) +{ + int cpu; + cpumask_t mask = { CPU_BITS_NONE }; + + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + for_each_online_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; + /* + * We only need to send an IPI if the other CPUs are running + * the same ASID as the one being invalidated. There is no + * need for locking around the active_asids check since the + * switch_mm() function has at least one dmb() (as required by + * this workaround) in case a context switch happens on + * another CPU after the condition below. + */ + if (atomic64_read(&mm->context.id) == + atomic64_read(&per_cpu(active_asids, cpu))) + cpumask_set_cpu(cpu, &mask); + } + smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); +} + void flush_tlb_all(void) { if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else local_flush_tlb_all(); + broadcast_tlb_a15_erratum(); } void flush_tlb_mm(struct mm_struct *mm) @@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); else local_flush_tlb_mm(mm); + broadcast_tlb_mm_a15_erratum(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) @@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) &ta, 1); } else local_flush_tlb_page(vma, uaddr); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_page(unsigned long kaddr) @@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); + broadcast_tlb_a15_erratum(); } void flush_tlb_range(struct vm_area_struct *vma, @@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma, &ta, 1); } else local_flush_tlb_range(vma, start, end); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) @@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } else local_flush_tlb_kernel_range(start, end); + broadcast_tlb_a15_erratum(); } void flush_bp_all(void) diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 30ae6bb4a310..f50f19e5c138 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -17,7 +17,6 @@ #include <asm/mach/map.h> #include <asm/memory.h> #include <asm/system_info.h> -#include "tcm.h" static struct gen_pool *tcm_pool; static bool dtcm_present; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 5a936988eb24..c1fe498983ac 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext) break; case KVM_CAP_ARM_SET_DEVICE_ADDR: r = 1; + break; case KVM_CAP_NR_VCPUS: r = num_online_cpus(); break; diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4ea9a982269c..7bed7556077a 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, u32 val; int cpu; - cpu = get_cpu(); - if (!p->is_write) return read_from_write_only(vcpu, p); + cpu = get_cpu(); + cpumask_setall(&vcpu->arch.require_dcache_flush); cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a17316e9fe..0e4cfe123b38 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) lr, irq, vgic_cpu->vgic_lr[lr]); BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; - - goto out; + return true; } /* Try to use another LR for this interrupt */ @@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) vgic_cpu->vgic_irq_lr_map[irq] = lr; set_bit(lr, vgic_cpu->lr_used); -out: if (!vgic_irq_is_edge(vcpu, irq)) vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; @@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); - /* - * We do not need to take the distributor lock here, since the only - * action we perform is clearing the irq_active_bit for an EOIed - * level interrupt. There is a potential race with - * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we - * check if the interrupt is already active. Two possibilities: - * - * - The queuing is occurring on the same vcpu: cannot happen, - * as we're already in the context of this vcpu, and - * executing the handler - * - The interrupt has been migrated to another vcpu, and we - * ignore this interrupt for this run. Big deal. It is still - * pending though, and will get considered when this vcpu - * exits. - */ if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { /* * Some level interrupts have been EOIed. Clear their @@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } else { vgic_cpu_irq_clear(vcpu, irq); } + + /* + * Despite being EOIed, the LR may not have + * been marked as empty. + */ + set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); + vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; } } @@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } /* - * Sync back the VGIC state after a guest run. We do not really touch - * the distributor here (the irq_pending_on_cpu bit is safe to set), - * so there is no need for taking its lock. + * Sync back the VGIC state after a guest run. The distributor lock is + * needed so we don't get preempted in the middle of the state processing. */ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { @@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + if (!irqchip_in_kernel(vcpu->kvm)) return; + spin_lock(&dist->lock); __kvm_vgic_sync_hwstate(vcpu); + spin_unlock(&dist->lock); } int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 6b93f6a1a3c7..64dbfa57204a 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles) static void __timer_const_udelay(unsigned long xloops) { unsigned long long loops = xloops; - loops *= loops_per_jiffy; + loops *= arm_delay_ops.ticks_per_jiffy; __timer_delay(loops >> UDELAY_SHIFT); } @@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer) pr_info("Switching to timer-based delay loop\n"); delay_timer = timer; lpj_fine = timer->freq / HZ; - loops_per_jiffy = lpj_fine; + + /* cpufreq may scale loops_per_jiffy, so keep a private copy */ + arm_delay_ops.ticks_per_jiffy = lpj_fine; arm_delay_ops.delay = __timer_delay; arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.udelay = __timer_udelay; - arm_delay_ops.const_clock = true; + delay_calibrated = true; } else { pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index e698f26cc0cb..52e4bb5cf12d 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c @@ -22,19 +22,9 @@ static struct map_desc cns3xxx_io_desc[] __initdata = { { - .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), - .length = SZ_4K, - .type = MT_DEVICE, - }, { - .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE), - .length = SZ_4K, - .type = MT_DEVICE, - }, { - .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE), - .length = SZ_4K, + .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT, + .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE), + .length = SZ_8K, .type = MT_DEVICE, }, { .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h index 191c8e57f289..b1021aafa481 100644 --- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h +++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h @@ -94,10 +94,10 @@ #define RTC_INTR_STS_OFFSET 0x34 #define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ -#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ +#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */ #define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ -#define CNS3XXX_PM_BASE_VIRT 0xFFF08000 +#define CNS3XXX_PM_BASE_VIRT 0xFB001000 #define PM_CLK_GATE_OFFSET 0x00 #define PM_SOFT_RST_OFFSET 0x04 @@ -109,7 +109,7 @@ #define PM_PLL_HM_PD_OFFSET 0x1C #define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ -#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 +#define CNS3XXX_UART0_BASE_VIRT 0xFB002000 #define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ #define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 @@ -130,7 +130,7 @@ #define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 #define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ -#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 +#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000 #define TIMER1_COUNTER_OFFSET 0x00 #define TIMER1_AUTO_RELOAD_OFFSET 0x04 @@ -227,16 +227,16 @@ * Testchip peripheral and fpga gic regions */ #define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ -#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 +#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000 #define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ -#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 +#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100) #define CNS3XXX_TC11MP_TWD_BASE 0x90000600 -#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 +#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600) #define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ -#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 +#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000) #define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ #define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h index d2afb4dd82ab..b5cc77d2380b 100644 --- a/arch/arm/mach-ep93xx/include/mach/uncompress.h +++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h @@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr) static inline void putc(int c) { - /* Transmit fifo not full? */ - while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF) - ; + int i; + + for (i = 0; i < 10000; i++) { + /* Transmit fifo not full? */ + if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)) + break; + } __raw_writeb(c, PHYS_UART_DATA); } diff --git a/arch/arm/mach-highbank/hotplug.c b/arch/arm/mach-highbank/hotplug.c index f30c52843396..890cae23c12a 100644 --- a/arch/arm/mach-highbank/hotplug.c +++ b/arch/arm/mach-highbank/hotplug.c @@ -28,13 +28,11 @@ extern void secondary_startup(void); */ void __ref highbank_cpu_die(unsigned int cpu) { - flush_cache_all(); - highbank_set_cpu_jump(cpu, phys_to_virt(0)); - highbank_set_core_pwr(); - cpu_do_idle(); + flush_cache_louis(); + highbank_set_core_pwr(); - /* We should never return from idle */ - panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu); + while (1) + cpu_do_idle(); } diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index e13a8fa5e62c..2193c834f55c 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -257,6 +257,7 @@ int __init mx35_clocks_init(void) clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); + clk_register_clkdev(clk[admux_gate], "audmux", NULL); clk_prepare_enable(clk[spba_gate]); clk_prepare_enable(clk[gpio1_gate]); @@ -265,6 +266,7 @@ int __init mx35_clocks_init(void) clk_prepare_enable(clk[iim_gate]); clk_prepare_enable(clk[emi_gate]); clk_prepare_enable(clk[max_gate]); + clk_prepare_enable(clk[iomuxc_gate]); /* * SCC is needed to boot via mmc after a watchdog reset. The clock code diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 2f9ff93a4e61..d38e54f5b6d7 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m" static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; -static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; +static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; @@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void) clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); - clk_register_clkdev(clk[twd], NULL, "smp_twd"); clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); clk_register_clkdev(clk[ahb], "ahb", NULL); clk_register_clkdev(clk[cko1], "cko1", NULL); diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 5a800bfcec5b..5bf4a97ab241 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *); extern void imx_enable_cpu(int cpu, bool enable); extern void imx_set_cpu_jump(int cpu, void *jump_addr); +extern u32 imx_get_cpu_arg(int cpu); +extern void imx_set_cpu_arg(int cpu, u32 arg); extern void v7_cpu_resume(void); extern u32 *pl310_get_save_ptr(void); #ifdef CONFIG_SMP diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 7bc5fe15dda2..361a253e2b63 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c @@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void) void imx_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); + /* + * We use the cpu jumping argument register to sync with + * imx_cpu_kill() which is running on cpu0 and waiting for + * the register being cleared to kill the cpu. + */ + imx_set_cpu_arg(cpu, ~0); cpu_do_idle(); } int imx_cpu_kill(unsigned int cpu) { + unsigned long timeout = jiffies + msecs_to_jiffies(50); + + while (imx_get_cpu_arg(cpu) == 0) + if (time_after(jiffies, timeout)) + return 0; imx_enable_cpu(cpu, false); + imx_set_cpu_arg(cpu, 0); return 1; } diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index e15f1555c59b..09a742f8c7ab 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c @@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr) src_base + SRC_GPR1 + cpu * 8); } +u32 imx_get_cpu_arg(int cpu) +{ + cpu = cpu_logical_map(cpu); + return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); +} + +void imx_set_cpu_arg(int cpu, u32 arg) +{ + cpu = cpu_logical_map(cpu); + writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); +} + void imx_src_prepare_restart(void) { u32 val; diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c index f655b2637b0e..e5f70415905a 100644 --- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c +++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c @@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = { .duplex = DUPLEX_FULL, }; +static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(11), +}; + void __init iomega_ix2_200_init(void) { /* * Basic setup. Needs to be called early. */ - kirkwood_ge01_init(&iomega_ix2_200_ge00_data); + kirkwood_ge00_init(&iomega_ix2_200_ge00_data); + kirkwood_ge01_init(&iomega_ix2_200_ge01_data); } diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c index 1c6e736cbbf8..08dd739aa709 100644 --- a/arch/arm/mach-kirkwood/guruplug-setup.c +++ b/arch/arm/mach-kirkwood/guruplug-setup.c @@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = { static struct mvsdio_platform_data guruplug_mvsdio_data = { /* unfortunately the CD signal has not been connected */ + .gpio_card_detect = -1, + .gpio_write_protect = -1, }; static struct gpio_led guruplug_led_pins[] = { diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c index 8ddd69fdc937..6a6eb548307d 100644 --- a/arch/arm/mach-kirkwood/openrd-setup.c +++ b/arch/arm/mach-kirkwood/openrd-setup.c @@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = { static struct mvsdio_platform_data openrd_mvsdio_data = { .gpio_card_detect = 29, /* MPP29 used as SD card detect */ + .gpio_write_protect = -1, }; static unsigned int openrd_mpp_config[] __initdata = { diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c index c7d93b48926b..d24223166e06 100644 --- a/arch/arm/mach-kirkwood/rd88f6281-setup.c +++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c @@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = { static struct mvsdio_platform_data rd88f6281_mvsdio_data = { .gpio_card_detect = 28, + .gpio_write_protect = -1, }; static unsigned int rd88f6281_mpp_config[] __initdata = { diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 2969027f02fa..f9fd77e8f1f5 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles, { u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); - writel_relaxed(0, event_base + TIMER_CLEAR); + ctrl &= ~TIMER_ENABLE_EN; + writel_relaxed(ctrl, event_base + TIMER_ENABLE); + + writel_relaxed(ctrl, event_base + TIMER_CLEAR); writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); return 0; diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c index 274ff58271de..d5970f5a1e8d 100644 --- a/arch/arm/mach-mvebu/irq-armada-370-xp.c +++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c @@ -44,6 +44,8 @@ #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) +#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) + #define ACTIVE_DOORBELLS (8) static DEFINE_RAW_SPINLOCK(irq_controller_lock); @@ -59,36 +61,26 @@ static struct irq_domain *armada_370_xp_mpic_domain; */ static void armada_370_xp_irq_mask(struct irq_data *d) { -#ifdef CONFIG_SMP irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) writel(hwirq, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); else writel(hwirq, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); -#else - writel(irqd_to_hwirq(d), - per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); -#endif } static void armada_370_xp_irq_unmask(struct irq_data *d) { -#ifdef CONFIG_SMP irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) writel(hwirq, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); else writel(hwirq, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#else - writel(irqd_to_hwirq(d), - per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#endif } #ifdef CONFIG_SMP @@ -144,10 +136,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { armada_370_xp_irq_mask(irq_get_irq_data(virq)); - writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); + if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + writel(hw, per_cpu_int_base + + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + else + writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); irq_set_status_flags(virq, IRQ_LEVEL); - if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) { + if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { irq_set_percpu_devid(virq); irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, handle_percpu_devid_irq); diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c index cb7c6ae2e3fc..6c4f766365a2 100644 --- a/arch/arm/mach-omap1/clock_data.c +++ b/arch/arm/mach-omap1/clock_data.c @@ -543,15 +543,6 @@ static struct clk usb_dc_ck = { /* Direct from ULPD, no parent */ .rate = 48000000, .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), - .enable_bit = USB_REQ_EN_SHIFT, -}; - -static struct clk usb_dc_ck7xx = { - .name = "usb_dc_ck", - .ops = &clkops_generic, - /* Direct from ULPD, no parent */ - .rate = 48000000, - .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT, }; @@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = { CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), - CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX), - CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX), + CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX), CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), CLK(NULL, "mclk", &mclk_16xx, CK_16XX), CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310), diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index 3d58f335f173..0c6834ae1fc4 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c @@ -52,6 +52,13 @@ */ #define OMAP4_DPLL_ABE_DEFFREQ 98304000 +/* + * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section + * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred + * locked frequency for the USB DPLL is 960MHz. + */ +#define OMAP4_DPLL_USB_DEFFREQ 960000000 + /* Root clocks */ DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); @@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel, OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); +DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0, + OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, + OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL); + DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); @@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = { CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X), CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X), CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X), + CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X), CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X), CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X), CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X), @@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void) if (rc) pr_err("%s: failed to configure ABE DPLL!\n", __func__); + /* + * Lock USB DPLL on OMAP4 devices so that the L3INIT power + * domain can transition to retention state when not in use. + */ + rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ); + if (rc) + pr_err("%s: failed to configure USB DPLL!\n", __func__); + return 0; } diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 40f4a03d728f..d6ba13e1c540 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -293,5 +293,8 @@ extern void omap_reserve(void); struct omap_hwmod; extern int omap_dss_reset(struct omap_hwmod *); +/* SoC specific clock initializer */ +extern int (*omap_clk_init)(void); + #endif /* __ASSEMBLER__ */ #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 2c3fdd65387b..5c445ca1e271 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -55,6 +55,12 @@ #include "prm44xx.h" /* + * omap_clk_init: points to a function that does the SoC-specific + * clock initializations + */ +int (*omap_clk_init)(void); + +/* * The machine specific code may provide the extra mapping besides the * default mapping provided here. */ @@ -397,7 +403,7 @@ void __init omap2420_init_early(void) omap242x_clockdomains_init(); omap2420_hwmod_init(); omap_hwmod_init_postsetup(); - omap2420_clk_init(); + omap_clk_init = omap2420_clk_init; } void __init omap2420_init_late(void) @@ -427,7 +433,7 @@ void __init omap2430_init_early(void) omap243x_clockdomains_init(); omap2430_hwmod_init(); omap_hwmod_init_postsetup(); - omap2430_clk_init(); + omap_clk_init = omap2430_clk_init; } void __init omap2430_init_late(void) @@ -462,7 +468,7 @@ void __init omap3_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap3xxx_clk_init(); + omap_clk_init = omap3xxx_clk_init; } void __init omap3430_init_early(void) @@ -500,7 +506,7 @@ void __init ti81xx_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap3xxx_clk_init(); + omap_clk_init = omap3xxx_clk_init; } void __init omap3_init_late(void) @@ -568,7 +574,7 @@ void __init am33xx_init_early(void) am33xx_clockdomains_init(); am33xx_hwmod_init(); omap_hwmod_init_postsetup(); - am33xx_clk_init(); + omap_clk_init = am33xx_clk_init; } #endif @@ -593,7 +599,7 @@ void __init omap4430_init_early(void) omap44xx_clockdomains_init(); omap44xx_hwmod_init(); omap_hwmod_init_postsetup(); - omap4xxx_clk_init(); + omap_clk_init = omap4xxx_clk_init; } void __init omap4430_init_late(void) diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index c2c798c08c2b..a202a4785104 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh) } if (sf & SYSC_HAS_MIDLEMODE) { - if (oh->flags & HWMOD_SWSUP_MSTANDBY) { + if (oh->flags & HWMOD_FORCE_MSTANDBY) { + idlemode = HWMOD_IDLEMODE_FORCE; + } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) { idlemode = HWMOD_IDLEMODE_NO; } else { if (sf & SYSC_HAS_ENAWAKEUP) @@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh) } if (sf & SYSC_HAS_MIDLEMODE) { - if (oh->flags & HWMOD_SWSUP_MSTANDBY) { + if ((oh->flags & HWMOD_SWSUP_MSTANDBY) || + (oh->flags & HWMOD_FORCE_MSTANDBY)) { idlemode = HWMOD_IDLEMODE_FORCE; } else { if (sf & SYSC_HAS_ENAWAKEUP) diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index d43d9b608eda..d5dc935f6060 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm { * * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out * of idle, rather than relying on module smart-idle - * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out - * of standby, rather than relying on module smart-standby + * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and + * out of standby, rather than relying on module smart-standby * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for * SDRAM controller, etc. XXX probably belongs outside the main hwmod file * XXX Should be HWMOD_SETUP_NO_RESET @@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm { * correctly, or this is being abused to deal with some PM latency * issues -- but we're currently suffering from a shortage of * folks who are able to track these issues down properly. + * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device + * is kept in force-standby mode. Failing to do so causes PM problems + * with musb on OMAP3630 at least. Note that musb has a dedicated register + * to control MSTANDBY signal when MIDLEMODE is set to force-standby. */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_16BIT_REG (1 << 8) #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) #define HWMOD_BLOCK_WFI (1 << 10) +#define HWMOD_FORCE_MSTANDBY (1 << 11) /* * omap_hwmod._int_flags definitions diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index ac7e03ec952f..5112d04e7b79 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = { * Erratum ID: i479 idle_req / idle_ack mechanism potentially * broken when autoidle is enabled * workaround is to disable the autoidle bit at module level. + * + * Enabling the device in any other MIDLEMODE setting but force-idle + * causes core_pwrdm not enter idle states at least on OMAP3630. + * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY + * signal when MIDLEMODE is set to force-idle. */ .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE - | HWMOD_SWSUP_MSTANDBY, + | HWMOD_FORCE_MSTANDBY, }; /* usb_otg_hs */ diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 0e47d2e1687c..eaba9dc91a0d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2719,7 +2719,17 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { .name = "ocp2scp_usb_phy", .class = &omap44xx_ocp2scp_hwmod_class, .clkdm_name = "l3_init_clkdm", - .main_clk = "func_48m_fclk", + /* + * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP + * block as an "optional clock," and normally should never be + * specified as the main_clk for an OMAP IP block. However it + * turns out that this clock is actually the main clock for + * the ocp2scp_usb_phy IP block: + * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html + * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems + * to be the best workaround. + */ + .main_clk = "ocp2scp_usb_phy_phy_48m", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET, diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 2bdd4cf17a8f..f62b509ed08d 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void) clksrc_nr, clksrc_src) \ void __init omap##name##_gptimer_timer_init(void) \ { \ + if (omap_clk_init) \ + omap_clk_init(); \ omap_dmtimer_init(); \ omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ @@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \ clksrc_nr, clksrc_src) \ void __init omap##name##_sync32k_timer_init(void) \ { \ + if (omap_clk_init) \ + omap_clk_init(); \ omap_dmtimer_init(); \ omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ /* Enable the use of clocksource="gp_timer" kernel parameter */ \ diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h index b7a9f4d469e8..1e73f5fa8659 100644 --- a/arch/arm/mach-s3c24xx/include/mach/irqs.h +++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h @@ -188,10 +188,8 @@ #if defined(CONFIG_CPU_S3C2416) #define NR_IRQS (IRQ_S3C2416_I2S1 + 1) -#elif defined(CONFIG_CPU_S3C2443) -#define NR_IRQS (IRQ_S3C2443_AC97+1) #else -#define NR_IRQS (IRQ_S3C2440_AC97+1) +#define NR_IRQS (IRQ_S3C2443_AC97 + 1) #endif /* compatibility define. */ diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c index cb9f5e011e73..d8ba9bee4c7e 100644 --- a/arch/arm/mach-s3c24xx/irq.c +++ b/arch/arm/mach-s3c24xx/irq.c @@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np, base = (void *)0xfd000000; intc->reg_mask = base + 0xa4; - intc->reg_pending = base + 0x08; + intc->reg_pending = base + 0xa8; irq_num = 20; irq_start = S3C2410_IRQ(32); irq_offset = 4; diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index 051b62c27102..7f2cb6c5e2c1 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c @@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = { #endif struct mmci_platform_data mop500_sdi0_data = { - .ios_handler = mop500_sdi0_ios_handler, .ocr_mask = MMC_VDD_29_30, .f_max = 50000000, .capabilities = MMC_CAP_4_BIT_DATA | diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index b03457881c4b..87d2d7b38ce9 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> +#include <linux/clk.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/platform_data/i2c-nomadik.h> @@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev) regulator_put(prox_regulator); } +void mop500_snowball_ethernet_clock_enable(void) +{ + struct clk *clk; + + clk = clk_get_sys("fsmc", NULL); + if (!IS_ERR(clk)) + clk_prepare_enable(clk); +} + static struct cryp_platform_data u8500_cryp1_platform_data = { .mem_to_engine = { .dir = STEDMA40_MEM_TO_PERIPH, @@ -683,6 +693,8 @@ static void __init snowball_init_machine(void) mop500_audio_init(parent); mop500_uart_init(parent); + mop500_snowball_ethernet_clock_enable(); + /* This board has full regulator constraints */ regulator_has_full_constraints(); } diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index eaa605f5d90d..d38951be70df 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h @@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void); void __init snowball_pinmaps_init(void); void __init hrefv60_pinmaps_init(void); void mop500_audio_init(struct device *parent); +void mop500_snowball_ethernet_clock_enable(void); int __init mop500_uib_init(void); void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 19235cf7bbe3..f1a581844372 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -312,9 +312,10 @@ static void __init u8500_init_machine(void) /* Pinmaps must be in place before devices register */ if (of_machine_is_compatible("st-ericsson,mop500")) mop500_pinmaps_init(); - else if (of_machine_is_compatible("calaosystems,snowball-a9500")) + else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { snowball_pinmaps_init(); - else if (of_machine_is_compatible("st-ericsson,hrefv60+")) + mop500_snowball_ethernet_clock_enable(); + } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) hrefv60_pinmaps_init(); else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} /* TODO: Add pinmaps for ccu9540 board. */ diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 025d17328730..4045c4931a30 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -43,7 +43,7 @@ config CPU_ARM740T depends on !MMU select CPU_32v4T select CPU_ABRT_LV4T - select CPU_CACHE_V3 # although the core is v4t + select CPU_CACHE_V4 select CPU_CP15_MPU select CPU_PABRT_LEGACY help @@ -469,9 +469,6 @@ config CPU_PABRT_V7 bool # The cache model -config CPU_CACHE_V3 - bool - config CPU_CACHE_V4 bool diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 4e333fa2756f..9e51be96f635 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o -obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index dd3d59122cc3..48bc3c0a87ce 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override) outer_cache.inv_range = feroceon_l2_inv_range; outer_cache.clean_range = feroceon_l2_clean_range; outer_cache.flush_range = feroceon_l2_flush_range; + outer_cache.inv_all = l2_inv_all; enable_l2(); diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f37390308a..c465faca51b0 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id) int lockregs; int i; - switch (cache_id) { + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: lockregs = 8; break; @@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) if (cache_id_part_number_from_dt) cache_id = cache_id_part_number_from_dt; else - cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) - & L2X0_CACHE_ID_PART_MASK; + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); aux &= aux_mask; aux |= aux_val; /* Determine the number of ways */ - switch (cache_id) { + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: if (aux & (1 << 16)) ways = 16; @@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = { .flush_all = l2x0_flush_all, .inv_all = l2x0_inv_all, .disable = l2x0_disable, - .set_debug = pl310_set_debug, }, }; @@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) data->save(); of_init = true; - l2x0_init(l2x0_base, aux_val, aux_mask); - memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); + l2x0_init(l2x0_base, aux_val, aux_mask); return 0; } diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S deleted file mode 100644 index 8a3fadece8d3..000000000000 --- a/arch/arm/mm/cache-v3.S +++ /dev/null @@ -1,137 +0,0 @@ -/* - * linux/arch/arm/mm/cache-v3.S - * - * Copyright (C) 1997-2002 Russell king - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/page.h> -#include "proc-macros.S" - -/* - * flush_icache_all() - * - * Unconditionally clean and invalidate the entire icache. - */ -ENTRY(v3_flush_icache_all) - mov pc, lr -ENDPROC(v3_flush_icache_all) - -/* - * flush_user_cache_all() - * - * Invalidate all cache entries in a particular address - * space. - * - * - mm - mm_struct describing address space - */ -ENTRY(v3_flush_user_cache_all) - /* FALLTHROUGH */ -/* - * flush_kern_cache_all() - * - * Clean and invalidate the entire cache. - */ -ENTRY(v3_flush_kern_cache_all) - /* FALLTHROUGH */ - -/* - * flush_user_cache_range(start, end, flags) - * - * Invalidate a range of cache entries in the specified - * address space. - * - * - start - start address (may not be aligned) - * - end - end address (exclusive, may not be aligned) - * - flags - vma_area_struct flags describing address space - */ -ENTRY(v3_flush_user_cache_range) - mov ip, #0 - mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache - mov pc, lr - -/* - * coherent_kern_range(start, end) - * - * Ensure coherency between the Icache and the Dcache in the - * region described by start. If you have non-snooping - * Harvard caches, you need to implement this function. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_coherent_kern_range) - /* FALLTHROUGH */ - -/* - * coherent_user_range(start, end) - * - * Ensure coherency between the Icache and the Dcache in the - * region described by start. If you have non-snooping - * Harvard caches, you need to implement this function. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_coherent_user_range) - mov r0, #0 - mov pc, lr - -/* - * flush_kern_dcache_area(void *page, size_t size) - * - * Ensure no D cache aliasing occurs, either with itself or - * the I cache - * - * - addr - kernel address - * - size - region size - */ -ENTRY(v3_flush_kern_dcache_area) - /* FALLTHROUGH */ - -/* - * dma_flush_range(start, end) - * - * Clean and invalidate the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_flush_range) - mov r0, #0 - mcr p15, 0, r0, c7, c0, 0 @ flush ID cache - mov pc, lr - -/* - * dma_unmap_area(start, size, dir) - * - start - kernel virtual start address - * - size - size of region - * - dir - DMA direction - */ -ENTRY(v3_dma_unmap_area) - teq r2, #DMA_TO_DEVICE - bne v3_dma_flush_range - /* FALLTHROUGH */ - -/* - * dma_map_area(start, size, dir) - * - start - kernel virtual start address - * - size - size of region - * - dir - DMA direction - */ -ENTRY(v3_dma_map_area) - mov pc, lr -ENDPROC(v3_dma_unmap_area) -ENDPROC(v3_dma_map_area) - - .globl v3_flush_kern_cache_louis - .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all - - __INITDATA - - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) - define_cache_functions v3 diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 43e5d77be677..a7ba68f59f0c 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all) ENTRY(v4_flush_user_cache_range) #ifdef CONFIG_CPU_CP15 mov ip, #0 - mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache + mcr p15, 0, ip, c7, c7, 0 @ flush ID cache mov pc, lr #else /* FALLTHROUGH */ diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a5a4b2bc42ba..2ac37372ef52 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); -static DEFINE_PER_CPU(atomic64_t, active_asids); +DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; @@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { local_flush_bp_all(); local_flush_tlb_all(); + dummy_flush_tlb_a15_erratum(); } atomic64_set(&per_cpu(active_asids, cpu), asid); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e95a996ab78f..a84ff763ac39 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -34,6 +34,7 @@ #include <asm/mach/pci.h> #include "mm.h" +#include "tcm.h" /* * empty_zero_page is a special page that is used for @@ -598,39 +599,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } while (pte++, addr += PAGE_SIZE, addr != end); } -static void __init alloc_init_section(pud_t *pud, unsigned long addr, - unsigned long end, phys_addr_t phys, - const struct mem_type *type) +static void __init map_init_section(pmd_t *pmd, unsigned long addr, + unsigned long end, phys_addr_t phys, + const struct mem_type *type) { - pmd_t *pmd = pmd_offset(pud, addr); - +#ifndef CONFIG_ARM_LPAE /* - * Try a section mapping - end, addr and phys must all be aligned - * to a section boundary. Note that PMDs refer to the individual - * L1 entries, whereas PGDs refer to a group of L1 entries making - * up one logical pointer to an L2 table. + * In classic MMU format, puds and pmds are folded in to + * the pgds. pmd_offset gives the PGD entry. PGDs refer to a + * group of L1 entries making up one logical pointer to + * an L2 table (2MB), where as PMDs refer to the individual + * L1 entries (1MB). Hence increment to get the correct + * offset for odd 1MB sections. + * (See arch/arm/include/asm/pgtable-2level.h) */ - if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { - pmd_t *p = pmd; - -#ifndef CONFIG_ARM_LPAE - if (addr & SECTION_SIZE) - pmd++; + if (addr & SECTION_SIZE) + pmd++; #endif + do { + *pmd = __pmd(phys | type->prot_sect); + phys += SECTION_SIZE; + } while (pmd++, addr += SECTION_SIZE, addr != end); - do { - *pmd = __pmd(phys | type->prot_sect); - phys += SECTION_SIZE; - } while (pmd++, addr += SECTION_SIZE, addr != end); + flush_pmd_entry(pmd); +} - flush_pmd_entry(p); - } else { +static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, + unsigned long end, phys_addr_t phys, + const struct mem_type *type) +{ + pmd_t *pmd = pmd_offset(pud, addr); + unsigned long next; + + do { /* - * No need to loop; pte's aren't interested in the - * individual L1 entries. + * With LPAE, we must loop over to map + * all the pmds for the given range. */ - alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); - } + next = pmd_addr_end(addr, end); + + /* + * Try a section mapping - addr, next and phys must all be + * aligned to a section boundary. + */ + if (type->prot_sect && + ((addr | next | phys) & ~SECTION_MASK) == 0) { + map_init_section(pmd, addr, next, phys, type); + } else { + alloc_init_pte(pmd, addr, next, + __phys_to_pfn(phys), type); + } + + phys += next - addr; + + } while (pmd++, addr = next, addr != end); } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, @@ -641,7 +663,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, do { next = pud_addr_end(addr, end); - alloc_init_section(pud, addr, next, phys, type); + alloc_init_pmd(pud, addr, next, phys, type); phys += next - addr; } while (pud++, addr = next, addr != end); } @@ -1256,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc) dma_contiguous_remap(); devicemaps_init(mdesc); kmap_init(); + tcm_init(); top_pmd = pmd_off_k(0xffff0000); diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index dc5de5d53f20..fde2d2a794cf 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -77,24 +77,27 @@ __arm740_setup: mcr p15, 0, r0, c6, c0 @ set area 0, default ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM - ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 + ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) + mov r4, #10 @ 11 is the minimum (4KB) +1: add r4, r4, #1 @ area size *= 2 + movs r3, r3, lsr #1 bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value + orr r0, r0, r4, lsl #1 @ the area register value orr r0, r0, #1 @ set enable bit mcr p15, 0, r0, c6, c1 @ set area 1, RAM ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH - ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 + ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) + cmp r3, #0 + moveq r0, #0 + beq 2f + mov r4, #10 @ 11 is the minimum (4KB) +1: add r4, r4, #1 @ area size *= 2 + movs r3, r3, lsr #1 bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value + orr r0, r0, r4, lsl #1 @ the area register value orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH +2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH mov r0, #0x06 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable @@ -137,13 +140,14 @@ __arm740_proc_info: .long 0x41807400 .long 0xfffffff0 .long 0 + .long 0 b __arm740_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT + .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT .long cpu_arm740_name .long arm740_processor_functions .long 0 .long 0 - .long v3_cache_fns @ cache model + .long v4_cache_fns @ cache model .size __arm740_proc_info, . - __arm740_proc_info diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2c3b9421ab5e..2556cf1c2da1 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index f1803f7e2972..344c8a548cc0 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 82f9cdc751d6..0b60dd3d742a 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext) .globl cpu_mohawk_suspend_size .equ cpu_mohawk_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_mohawk_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 3aa0da11fd84..d92dfd081429 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext) .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 3e6210b4d6d4..054b491ff764 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -17,7 +17,9 @@ #ifndef MULTI_CPU EXPORT_SYMBOL(cpu_dcache_clean_area); +#ifdef CONFIG_MMU EXPORT_SYMBOL(cpu_set_pte_ext); +#endif #else EXPORT_SYMBOL(processor); #endif diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index bcaaa8de9325..5c07ee4fe3eb 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015f8d5c..f584d3f5b37c 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -420,7 +420,7 @@ __v7_pj4b_proc_info: __v7_ca7mp_proc_info: .long 0x410fc070 .long 0xff0ffff0 - __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_ca7mp_setup .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info /* @@ -430,10 +430,25 @@ __v7_ca7mp_proc_info: __v7_ca15mp_proc_info: .long 0x410fc0f0 .long 0xff0ffff0 - __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_ca15mp_setup .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info /* + * Qualcomm Inc. Krait processors. + */ + .type __krait_proc_info, #object +__krait_proc_info: + .long 0x510f0400 @ Required ID value + .long 0xff0ffc00 @ Mask for ID + /* + * Some Krait processors don't indicate support for SDIV and UDIV + * instructions in the ARM instruction set, even though they actually + * do support them. + */ + __v7_proc __v7_setup, hwcaps = HWCAP_IDIV + .size __krait_proc_info, . - __krait_proc_info + + /* * Match any ARMv7 processor core. */ .type __v7_proc_info, #object diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index eb93d6487f35..e8efd83b6f25 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 25510361aa18..e766f889bfd6 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext) .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/kernel/tcm.h b/arch/arm/mm/tcm.h index 8015ad434a40..8015ad434a40 100644 --- a/arch/arm/kernel/tcm.h +++ b/arch/arm/mm/tcm.h diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h index cf60d0a9f176..fc6483f83ccc 100644 --- a/arch/avr32/include/asm/io.h +++ b/arch/avr32/include/asm/io.h @@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32) #define readw_be __raw_readw #define readl_be __raw_readl +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel + #define writeb_be __raw_writeb #define writew_be __raw_writew #define writel_be __raw_writel diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h index cf78e09e18c3..2c71d5634ec2 100644 --- a/arch/c6x/include/asm/irqflags.h +++ b/arch/c6x/include/asm/irqflags.h @@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void) /* set interrupt enabled status */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); + asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory"); } /* unconditionally enable interrupts */ diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 9a02f71c6b1f..da18c8e05217 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -110,6 +110,7 @@ config DMI config EFI bool + select UCS2_STRING default y config SCHED_OMIT_FRAME_POINTER diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 77597e5ea60a..79521d5499f9 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={ #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) -/* - * this array is used to keep track of the proc entries we create. This is - * required in the module mode when we need to remove all entries. The procfs code - * does not do recursion of deletion - * - * Notes: - * - +1 accounts for the cpuN directory entry in /proc/pal - */ -#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1)) - -static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES]; static struct proc_dir_entry *palinfo_dir; /* @@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi static void __cpuinit create_palinfo_proc_entries(unsigned int cpu) { -# define CPUSTR "cpu%d" - pal_func_cpu_u_t f; - struct proc_dir_entry **pdir; struct proc_dir_entry *cpu_dir; int j; - char cpustr[sizeof(CPUSTR)]; - - - /* - * we keep track of created entries in a depth-first order for - * cleanup purposes. Each entry is stored into palinfo_proc_entries - */ - sprintf(cpustr,CPUSTR, cpu); + char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ + sprintf(cpustr, "cpu%d", cpu); cpu_dir = proc_mkdir(cpustr, palinfo_dir); + if (!cpu_dir) + return; f.req_cpu = cpu; - /* - * Compute the location to store per cpu entries - * We dont store the top level entry in this list, but - * remove it finally after removing all cpu entries. - */ - pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)]; - *pdir++ = cpu_dir; for (j=0; j < NR_PALINFO_ENTRIES; j++) { f.func_id = j; - *pdir = create_proc_read_entry( - palinfo_entries[j].name, 0, cpu_dir, - palinfo_read_entry, (void *)f.value); - pdir++; + create_proc_read_entry( + palinfo_entries[j].name, 0, cpu_dir, + palinfo_read_entry, (void *)f.value); } } static void remove_palinfo_proc_entries(unsigned int hcpu) { - int j; - struct proc_dir_entry *cpu_dir, **pdir; - - pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)]; - cpu_dir = *pdir; - *pdir++=NULL; - for (j=0; j < (NR_PALINFO_ENTRIES); j++) { - if ((*pdir)) { - remove_proc_entry ((*pdir)->name, cpu_dir); - *pdir ++= NULL; - } - } - - if (cpu_dir) { - remove_proc_entry(cpu_dir->name, palinfo_dir); - } + char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ + sprintf(cpustr, "cpu%d", hcpu); + remove_proc_subtree(cpustr, palinfo_dir); } static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, @@ -1058,6 +1019,8 @@ palinfo_init(void) printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); palinfo_dir = proc_mkdir("pal", NULL); + if (!palinfo_dir) + return -ENOMEM; /* Create palinfo dirs in /proc for all online cpus */ for_each_online_cpu(i) { @@ -1073,22 +1036,8 @@ palinfo_init(void) static void __exit palinfo_exit(void) { - int i = 0; - - /* remove all nodes: depth first pass. Could optimize this */ - for_each_online_cpu(i) { - remove_palinfo_proc_entries(i); - } - - /* - * Remove the top level entry finally - */ - remove_proc_entry(palinfo_dir->name, NULL); - - /* - * Unregister from cpu notifier callbacks - */ unregister_hotcpu_notifier(&palinfo_cpu_notifier); + remove_proc_subtree("pal", NULL); } module_init(palinfo_init); diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h index 4395ffc51fdb..8cc83431805b 100644 --- a/arch/m68k/include/asm/gpio.h +++ b/arch/m68k/include/asm/gpio.h @@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio) return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); } +static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) +{ + int err; + + err = gpio_request(gpio, label); + if (err) + return err; + + if (flags & GPIOF_DIR_IN) + err = gpio_direction_input(gpio); + else + err = gpio_direction_output(gpio, + (flags & GPIOF_INIT_HIGH) ? 1 : 0); + + if (err) + gpio_free(gpio); + + return err; +} + #endif diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cd2e21ff562a..51244bf97271 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -18,7 +18,7 @@ config MIPS select HAVE_KRETPROBES select HAVE_DEBUG_KMEMLEAK select ARCH_BINFMT_ELF_RANDOMIZE_PIE - select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT select RTC_LIB if !MACH_LOONGSON select GENERIC_ATOMIC64 if !64BIT select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE @@ -657,7 +657,7 @@ config SNI_RM bool "SNI RM200/300/400" select FW_ARC if CPU_LITTLE_ENDIAN select FW_ARC32 if CPU_LITTLE_ENDIAN - select SNIPROM if CPU_BIG_ENDIAN + select FW_SNIPROM if CPU_BIG_ENDIAN select ARCH_MAY_HAVE_PC_FDC select BOOT_ELF32 select CEVT_R4K @@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION config FW_ARC32 bool -config SNIPROM +config FW_SNIPROM bool config BOOT_ELF32 @@ -1493,7 +1493,6 @@ config CPU_XLP select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM - select CPU_HAS_LLSC select WEAK_ORDERING select WEAK_REORDERING_BEYOND_LLSC select CPU_HAS_PREFETCH diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index ed1949c29508..9aa7d44898ed 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -745,10 +745,7 @@ void __init board_prom_init(void) strcpy(cfe_version, "unknown"); printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); - if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { - printk(KERN_ERR PFX "invalid nvram checksum\n"); - return; - } + bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET); board_name = bcm63xx_nvram_get_name(); /* find board by name */ diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index 620611680839..a4b8864f9307 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c @@ -38,7 +38,7 @@ struct bcm963xx_nvram { static struct bcm963xx_nvram nvram; static int mac_addr_used; -int __init bcm63xx_nvram_init(void *addr) +void __init bcm63xx_nvram_init(void *addr) { unsigned int check_len; u32 crc, expected_crc; @@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr) crc = crc32_le(~0, (u8 *)&nvram, check_len); if (crc != expected_crc) - return -EINVAL; - - return 0; + pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", + expected_crc, crc); } u8 *bcm63xx_nvram_get_name(void) diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 314231be788c..35e18e98beb9 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void) return board_register_devices(); } -device_initcall(bcm63xx_register_devices); +arch_initcall(bcm63xx_register_devices); diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index c594a3d4f743..b0baa299f899 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image) static void octeon_generic_shutdown(void) { - int cpu, i; + int i; +#ifdef CONFIG_SMP + int cpu; +#endif struct cvmx_bootmem_desc *bootmem_desc; void *named_block_array_ptr; diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h index 62d6a3b4d3b7..4e0b6bc1165e 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h @@ -9,10 +9,8 @@ * * Initialized the local nvram copy from the target address and checks * its checksum. - * - * Returns 0 on success. */ -int __init bcm63xx_nvram_init(void *nvram); +void bcm63xx_nvram_init(void *nvram); /** * bcm63xx_nvram_get_name() - returns the board name according to nvram diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index d9c828419037..193c0912d38e 100644 --- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h @@ -28,11 +28,7 @@ /* #define cpu_has_prefetch ? */ #define cpu_has_mcheck 1 /* #define cpu_has_ejtag ? */ -#ifdef CONFIG_CPU_HAS_LLSC #define cpu_has_llsc 1 -#else -#define cpu_has_llsc 0 -#endif /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 12b70c25906a..0da44d422f5b 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1166,7 +1166,10 @@ do { \ unsigned int __dspctl; \ \ __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ " rddsp %0, %x1 \n" \ + " .set pop \n" \ : "=r" (__dspctl) \ : "i" (mask)); \ __dspctl; \ @@ -1175,30 +1178,198 @@ do { \ #define wrdsp(val, mask) \ do { \ __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ " wrdsp %0, %x1 \n" \ + " .set pop \n" \ : \ : "r" (val), "i" (mask)); \ } while (0) -#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) -#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) -#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) -#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) - -#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) -#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) -#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) -#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) - -#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) -#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) -#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) -#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) - -#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) -#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) -#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) -#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) +#define mflo0() \ +({ \ + long mflo0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mflo0)); \ + mflo0; \ +}) + +#define mflo1() \ +({ \ + long mflo1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mflo1)); \ + mflo1; \ +}) + +#define mflo2() \ +({ \ + long mflo2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mflo2)); \ + mflo2; \ +}) + +#define mflo3() \ +({ \ + long mflo3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mflo3)); \ + mflo3; \ +}) + +#define mfhi0() \ +({ \ + long mfhi0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mfhi0)); \ + mfhi0; \ +}) + +#define mfhi1() \ +({ \ + long mfhi1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mfhi1)); \ + mfhi1; \ +}) + +#define mfhi2() \ +({ \ + long mfhi2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mfhi2)); \ + mfhi2; \ +}) + +#define mfhi3() \ +({ \ + long mfhi3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mfhi3)); \ + mfhi3; \ +}) + + +#define mtlo0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) #else diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 99fc547af9d3..eab99e536b5c 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -31,7 +31,7 @@ #define PAGE_SHIFT 16 #endif #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 197f6367c201..8efe5a9e2c3e 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h @@ -21,6 +21,6 @@ #include <asm/sigcontext.h> #include <asm/siginfo.h> -#define __ARCH_HAS_ODD_SIGACTION +#define __ARCH_HAS_IRIX_SIGACTION #endif /* _ASM_SIGNAL_H */ diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h index d6b18b4d0f3a..addb9f556b71 100644 --- a/arch/mips/include/uapi/asm/signal.h +++ b/arch/mips/include/uapi/asm/signal.h @@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ * * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single * Unix names RESETHAND and NODEFER respectively. + * + * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever + * supported its use and no libc was using it, so the entire sa-restorer + * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48 + * retaining only the SA_RESTORER definition as a reminder to avoid + * accidental reuse of the mask bit. */ #define SA_ONSTACK 0x08000000 #define SA_RESETHAND 0x80000000 @@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_RESTORER 0x04000000 /* Only for o32 */ - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index f81d98f6184c..de75fb50562b 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o # -# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe -# to enable DSP assembler support here even if the MIPS Release 2 CPU we -# are targetting does not support DSP because all code-paths making use of -# it properly check that the running CPU *actually does* support these -# instructions. +# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not +# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches +# here because the compiler may use DSP ASE instructions (such as lwx) in +# code paths where we cannot check that the CPU we are running on supports it. +# Proper abstraction using HAVE_AS_DSP and macros is done in +# arch/mips/include/asm/mipsregs.h. # ifeq ($(CONFIG_CPU_MIPSR2), y) CFLAGS_DSP = -DHAVE_AS_DSP -# -# Check if assembler supports DSP ASE -# -ifeq ($(call cc-option-yn,-mdsp), y) -CFLAGS_DSP += -mdsp -endif - -# -# Check if assembler supports DSP ASE Rev2 -# -ifeq ($(call cc-option-yn,-mdspr2), y) -CFLAGS_DSP += -mdspr2 -endif - CFLAGS_signal.o = $(CFLAGS_DSP) CFLAGS_signal32.o = $(CFLAGS_DSP) CFLAGS_process.o = $(CFLAGS_DSP) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6bfccc227a95..5fe66a0c3224 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->tlbsize = 48; break; case PRID_IMP_VR41XX: + set_isa(c, MIPS_CPU_ISA_III); + c->options = R4K_OPTS; + c->tlbsize = 32; switch (c->processor_id & 0xf0) { case PRID_REV_VR4111: c->cputype = CPU_VR4111; @@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "NEC VR4131"; } else { c->cputype = CPU_VR4133; + c->options |= MIPS_CPU_LLSC; __cpu_name[cpu] = "NEC VR4133"; } break; @@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "NEC Vr41xx"; break; } - set_isa(c, MIPS_CPU_ISA_III); - c->options = R4K_OPTS; - c->tlbsize = 32; break; case PRID_IMP_R4300: c->cputype = CPU_R4300; @@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void) if (c->options & MIPS_CPU_FPU) { c->fpu_id = cpu_get_fpu_id(); - if (c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M32R2 || - c->isa_level == MIPS_CPU_ISA_M64R1 || - c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; } diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 8eeee1c860c0..db9655f08892 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third, err = compat_sys_shmctl(first, second, compat_ptr(ptr)); break; default: - err = -EINVAL; + err = -ENOSYS; break; } diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 165867673357..33d067148e61 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -46,10 +46,9 @@ PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) -#else - PTR_ADDIU sp, PT_SIZE #endif -.endm + PTR_ADDIU sp, PT_SIZE + .endm .macro RETURN_BACK jr ra @@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra) .globl _mcount _mcount: b ftrace_stub - addiu sp,sp,8 +#ifdef CONFIG_32BIT + addiu sp,sp,8 +#else + nop +#endif /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ lw t1, function_trace_stop diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 135c4aadccbe..7a54f74b7818 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_mips_r) { seq_printf(m, "isa\t\t\t:"); if (cpu_has_mips_1) - seq_printf(m, "%s", "mips1"); + seq_printf(m, "%s", " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); if (cpu_has_mips_3) diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a200b5bdbb87..c3abb88170fc 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif - if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) + if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) status_set |= ST0_XX; if (cpu_has_dsp) status_set |= ST0_MX; diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 81f1dcfdcab8..a64daee740ee 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a |= mask; raw_local_irq_restore(flags); return res; @@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a |= mask; raw_local_irq_restore(flags); return res; @@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a &= ~mask; raw_local_irq_restore(flags); return res; @@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a ^= mask; raw_local_irq_restore(flags); return res; diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 507147aebd41..a6adffbb4e5f 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -270,7 +270,7 @@ LEAF(csum_partial) #endif /* odd buffer alignment? */ -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2 wsbh v1, sum movn sum, v1, t7 #else @@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) addu sum, v1 #endif -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2 wsbh v1, sum movn sum, v1, odd #else diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index ecca559b8d7b..2078915eacb9 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void) return; default: - if (c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M32R2 || - c->isa_level == MIPS_CPU_ISA_M64R1 || - c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { #ifdef CONFIG_MIPS_CPU_SCACHE if (mips_sc_init ()) { scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 93d937b4b1ba..df96da7e939b 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void) c->scache.flags |= MIPS_CACHE_NOT_PRESENT; /* Ignore anything but MIPSxx processors */ - if (c->isa_level != MIPS_CPU_ISA_M32R1 && - c->isa_level != MIPS_CPU_ISA_M32R2 && - c->isa_level != MIPS_CPU_ISA_M64R1 && - c->isa_level != MIPS_CPU_ISA_M64R2) + if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) return 0; /* Does this MIPS32/MIPS64 CPU have a config2 register? */ diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index 38a80c83fd67..d1faece21b6a 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -19,7 +19,7 @@ #include <asm/mach-au1x00/au1000.h> #include <asm/tlbmisc.h> -#ifdef CONFIG_DEBUG_PCI +#ifdef CONFIG_PCI_DEBUG #define DBG(x...) printk(KERN_DEBUG x) #else #define DBG(x...) do {} while (0) @@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus, if (status & (1 << 29)) { *data = 0xffffffff; error = -1; - DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", + DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n", access_type, bus->number, device); } else if ((status >> 28) & 0xf) { DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 01d95e2f0581..113e28206503 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -65,8 +65,10 @@ ifndef CONFIG_FUNCTION_TRACER endif # Use long jumps instead of long branches (needed if your linker fails to -# link a too big vmlinux executable) -cflags-$(CONFIG_MLONGCALLS) += -mlong-calls +# link a too big vmlinux executable). Not enabled for building modules. +ifdef CONFIG_MLONGCALLS +KBUILD_CFLAGS_KERNEL += -mlong-calls +endif # select which processor to optimise for cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100 diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 79f694f3ad9b..f0e2784e7cca 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -140,7 +140,10 @@ static inline void *kmap(struct page *page) return page_address(page); } -#define kunmap(page) kunmap_parisc(page_address(page)) +static inline void kunmap(struct page *page) +{ + kunmap_parisc(page_address(page)); +} static inline void *kmap_atomic(struct page *page) { diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 7df49fad29f9..1e40d7f86be3 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -16,6 +16,8 @@ #include <asm/processor.h> #include <asm/cache.h> +extern spinlock_t pa_dbit_lock; + /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= @@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); #define set_pte_at(mm, addr, ptep, pteval) \ do { \ + unsigned long flags; \ + spin_lock_irqsave(&pa_dbit_lock, flags); \ set_pte(ptep, pteval); \ purge_tlb_entries(mm, addr); \ + spin_unlock_irqrestore(&pa_dbit_lock, flags); \ } while (0) #endif /* !__ASSEMBLY__ */ @@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { -#ifdef CONFIG_SMP + pte_t pte; + unsigned long flags; + if (!pte_young(*ptep)) return 0; - return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); -#else - pte_t pte = *ptep; - if (!pte_young(pte)) + + spin_lock_irqsave(&pa_dbit_lock, flags); + pte = *ptep; + if (!pte_young(pte)) { + spin_unlock_irqrestore(&pa_dbit_lock, flags); return 0; - set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); + } + set_pte(ptep, pte_mkold(pte)); + purge_tlb_entries(vma->vm_mm, addr); + spin_unlock_irqrestore(&pa_dbit_lock, flags); return 1; -#endif } -extern spinlock_t pa_dbit_lock; - struct mm_struct; static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte; + unsigned long flags; - spin_lock(&pa_dbit_lock); + spin_lock_irqsave(&pa_dbit_lock, flags); old_pte = *ptep; pte_clear(mm,addr,ptep); - spin_unlock(&pa_dbit_lock); + purge_tlb_entries(mm, addr); + spin_unlock_irqrestore(&pa_dbit_lock, flags); return old_pte; } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { -#ifdef CONFIG_SMP - unsigned long new, old; - - do { - old = pte_val(*ptep); - new = pte_val(pte_wrprotect(__pte (old))); - } while (cmpxchg((unsigned long *) ptep, old, new) != old); + unsigned long flags; + spin_lock_irqsave(&pa_dbit_lock, flags); + set_pte(ptep, pte_wrprotect(*ptep)); purge_tlb_entries(mm, addr); -#else - pte_t old_pte = *ptep; - set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); -#endif + spin_unlock_irqrestore(&pa_dbit_lock, flags); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 4ba2c93770f1..e0a82358517e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -181,30 +181,24 @@ struct exception_data { #if !defined(CONFIG_64BIT) #define __put_kernel_asm64(__val,ptr) do { \ - u64 __val64 = (u64)(__val); \ - u32 hi = (__val64) >> 32; \ - u32 lo = (__val64) & 0xffffffff; \ __asm__ __volatile__ ( \ "\n1:\tstw %2,0(%1)" \ - "\n2:\tstw %3,4(%1)\n\t" \ + "\n2:\tstw %R2,4(%1)\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ : "=r"(__pu_err) \ - : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ + : "r"(ptr), "r"(__val), "0"(__pu_err) \ : "r1"); \ } while (0) #define __put_user_asm64(__val,ptr) do { \ - u64 __val64 = (u64)(__val); \ - u32 hi = (__val64) >> 32; \ - u32 lo = (__val64) & 0xffffffff; \ __asm__ __volatile__ ( \ "\n1:\tstw %2,0(%%sr3,%1)" \ - "\n2:\tstw %3,4(%%sr3,%1)\n\t" \ + "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ : "=r"(__pu_err) \ - : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ + : "r"(ptr), "r"(__val), "0"(__pu_err) \ : "r1"); \ } while (0) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 4b12890642eb..83ded26cad06 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) /* Note: purge_tlb_entries can be called at startup with no context. */ - /* Disable preemption while we play with %sr1. */ - preempt_disable(); - mtsp(mm->context, 1); purge_tlb_start(flags); + mtsp(mm->context, 1); pdtlb(addr); pitlb(addr); purge_tlb_end(flags); - preempt_enable(); } EXPORT_SYMBOL(purge_tlb_entries); diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 6795dc6c995f..568b2c61ea02 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -120,11 +120,13 @@ extern void __ashrdi3(void); extern void __ashldi3(void); extern void __lshrdi3(void); extern void __muldi3(void); +extern void __ucmpdi2(void); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__muldi3); +EXPORT_SYMBOL(__ucmpdi2); asmlinkage void * __canonicalize_funcptr_for_compare(void *); EXPORT_SYMBOL(__canonicalize_funcptr_for_compare); diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 5f2e6904d14a..5651536ac733 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile @@ -2,6 +2,7 @@ # Makefile for parisc-specific library files # -lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o +lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ + ucmpdi2.o obj-y := iomap.o diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c new file mode 100644 index 000000000000..149c016f32c5 --- /dev/null +++ b/arch/parisc/lib/ucmpdi2.c @@ -0,0 +1,25 @@ +#include <linux/module.h> + +union ull_union { + unsigned long long ull; + struct { + unsigned int high; + unsigned int low; + } ui; +}; + +int __ucmpdi2(unsigned long long a, unsigned long long b) +{ + union ull_union au = {.ull = a}; + union ull_union bu = {.ull = b}; + + if (au.ui.high < bu.ui.high) + return 0; + else if (au.ui.high > bu.ui.high) + return 2; + if (au.ui.low < bu.ui.low) + return 0; + else if (au.ui.low > bu.ui.low) + return 2; + return 1; +} diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 256c5bf0adb7..04d69c4a5ac2 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -304,7 +304,7 @@ syscall_exit_work: subi r12,r12,TI_FLAGS 4: /* Anything else left to do? */ - SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */ + SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) beq .ret_from_except_lite @@ -657,7 +657,7 @@ resume_kernel: /* Clear _TIF_EMULATE_STACK_STORE flag */ lis r11,_TIF_EMULATE_STACK_STORE@h addi r5,r9,TI_FLAGS - ldarx r4,0,r5 +0: ldarx r4,0,r5 andc r4,r4,r11 stdcx. r4,0,r5 bne- 0b diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 59dd545fdde1..16e77a81ab4f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new) new->thread.regs->msr |= (MSR_FP | new->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&new->thread); new->thread.regs->msr |= MSR_VEC; } +#endif /* We may as well turn on VSX too since all the state is restored now */ if (msr & MSR_VSX) new->thread.regs->msr |= MSR_VSX; diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 3acb28e245b4..95068bf569ad 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs, do_load_up_transact_fpu(¤t->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(¤t->thread); regs->msr |= MSR_VEC; } +#endif return 0; } diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 995f8543cb57..c1794286098c 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, do_load_up_transact_fpu(¤t->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(¤t->thread); regs->msr |= MSR_VEC; } +#endif return err; } diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 84dbace657ce..2da67e7a16d5 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint) or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */ mtmsr r5 +#ifdef CONFIG_ALTIVEC /* FP and VEC registers: These are recheckpointed from thread.fpr[] * and thread.vr[] respectively. The thread.transact_fpr[] version * is more modern, and will be loaded subsequently by any FPUnavailable @@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint) REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ ld r5, THREAD_VRSAVE(r3) mtspr SPRN_VRSAVE, r5 +#endif dont_restore_vec: andi. r0, r4, MSR_FP diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 41cefd43655f..33db48a8ce24 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -26,17 +26,20 @@ #define E500_PID_NUM 3 #define E500_TLB_NUM 2 -#define E500_TLB_VALID 1 -#define E500_TLB_BITMAP 2 +/* entry is mapped somewhere in host TLB */ +#define E500_TLB_VALID (1 << 0) +/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ +#define E500_TLB_BITMAP (1 << 1) +/* TLB1 entry is mapped by host TLB0 */ #define E500_TLB_TLB0 (1 << 2) struct tlbe_ref { - pfn_t pfn; - unsigned int flags; /* E500_TLB_* */ + pfn_t pfn; /* valid only for TLB0, except briefly */ + unsigned int flags; /* E500_TLB_* */ }; struct tlbe_priv { - struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ + struct tlbe_ref ref; }; #ifdef CONFIG_KVM_E500V2 @@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 { unsigned int gtlb_nv[E500_TLB_NUM]; - /* - * information associated with each host TLB entry -- - * TLB1 only for now. If/when guest TLB1 entries can be - * mapped with host TLB0, this will be used for that too. - * - * We don't want to use this for guest TLB0 because then we'd - * have the overhead of doing the translation again even if - * the entry is still in the guest TLB (e.g. we swapped out - * and back, and our host TLB entries got evicted). - */ - struct tlbe_ref *tlb_refs[E500_TLB_NUM]; unsigned int host_tlb1_nv; u32 svr; diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index a222edfb9a9b..1c6a9d729df4 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; /* Don't bother with unmapped entries */ - if (!(ref->flags & E500_TLB_VALID)) - return; + if (!(ref->flags & E500_TLB_VALID)) { + WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), + "%s: flags %x\n", __func__, ref->flags); + WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); + } if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; @@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, pfn_t pfn) { ref->pfn = pfn; - ref->flags = E500_TLB_VALID; + ref->flags |= E500_TLB_VALID; if (tlbe_is_writable(gtlbe)) kvm_set_pfn_dirty(pfn); @@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) { if (ref->flags & E500_TLB_VALID) { + /* FIXME: don't log bogus pfn for TLB1 */ trace_kvm_booke206_ref_release(ref->pfn, ref->flags); ref->flags = 0; } @@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) { - int tlbsel = 0; - int i; - - for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->gtlb_priv[tlbsel][i].ref; - kvmppc_e500_ref_release(ref); - } -} - -static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - int stlbsel = 1; + int tlbsel; int i; - kvmppc_e500_tlbil_all(vcpu_e500); - - for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->tlb_refs[stlbsel][i]; - kvmppc_e500_ref_release(ref); + for (tlbsel = 0; tlbsel <= 1; tlbsel++) { + for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { + struct tlbe_ref *ref = + &vcpu_e500->gtlb_priv[tlbsel][i].ref; + kvmppc_e500_ref_release(ref); + } } - - clear_tlb_privs(vcpu_e500); } void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - clear_tlb_refs(vcpu_e500); + kvmppc_e500_tlbil_all(vcpu_e500); + clear_tlb_privs(vcpu_e500); clear_tlb1_bitmap(vcpu_e500); } @@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); } - /* Drop old ref and setup new one. */ - kvmppc_e500_ref_release(ref); kvmppc_e500_ref_setup(ref, gtlbe, pfn); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, @@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) vcpu_e500->host_tlb1_nv = 0; - vcpu_e500->tlb_refs[1][sesel] = *ref; - vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; if (vcpu_e500->h2g_tlb1_rmap[sesel]) { - unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; + unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); } - vcpu_e500->h2g_tlb1_rmap[sesel] = esel; + + vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; + vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; + vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; + WARN_ON(!(ref->flags & E500_TLB_VALID)); return sesel; } @@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int esel) { - struct tlbe_ref ref; + struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; int sesel; int r; - ref.flags = 0; r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, - &ref); + ref); if (r) return r; @@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, } /* Otherwise map into TLB1 */ - sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); + sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); return 0; @@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, case 0: priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; - /* Triggers after clear_tlb_refs or on initial mapping */ + /* Triggers after clear_tlb_privs or on initial mapping */ if (!(priv->ref.flags & E500_TLB_VALID)) { kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); } else { @@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[1].sets = 1; - vcpu_e500->tlb_refs[0] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[0]) - goto err; - - vcpu_e500->tlb_refs[1] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[1]) - goto err; - vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * host_tlb_params[1].entries, GFP_KERNEL); if (!vcpu_e500->h2g_tlb1_rmap) - goto err; + return -EINVAL; return 0; - -err: - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); - return -EINVAL; } void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { kfree(vcpu_e500->h2g_tlb1_rmap); - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); } diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 1f89d26e65fb..2f4baa074b2e 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) { } +static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); + void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); mtspr(SPRN_GESR, vcpu->arch.shared->esr); - if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) + if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || + __get_cpu_var(last_vcpu_on_cpu) != vcpu) { kvmppc_e500_tlbil_all(vcpu_e500); + __get_cpu_var(last_vcpu_on_cpu) = vcpu; + } kvmppc_load_guest_fp(vcpu); } diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0da39fed355a..299731e9036b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) (0x1UL << 4), &dummy1, &dummy2); if (lpar_rc == H_SUCCESS) return i; - BUG_ON(lpar_rc != H_NOT_FOUND); + + /* + * The test for adjunct partition is performed before the + * ANDCOND test. H_RESOURCE may be returned, so we need to + * check for that as well. + */ + BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); slot_offset++; slot_offset &= 0x7; diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 27cb32185ce1..379d96e2105e 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr); #define ioremap_nocache(addr, size) ioremap(addr, size) #define ioremap_wc ioremap_nocache -/* TODO: s390 cannot support io_remap_pfn_range... */ -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { return (void __iomem *) offset; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a2930844d43..3cb47cf02530 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -57,6 +57,10 @@ extern unsigned long zero_page_mask; (((unsigned long)(vaddr)) &zero_page_mask)))) #define __HAVE_COLOR_ZERO_PAGE +/* TODO: s390 cannot support io_remap_pfn_range... */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #endif /* !__ASSEMBLY__ */ /* @@ -344,6 +348,7 @@ extern unsigned long MODULES_END; #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ /* Bits in the segment table entry */ +#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ @@ -1531,7 +1536,8 @@ extern int s390_enable_sie(void); /* * No page table caches to initialise */ -#define pgtable_cache_init() do { } while (0) +static inline void pgtable_cache_init(void) { } +static inline void check_pgt_cache(void) { } #include <asm-generic/pgtable.h> diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dff631d34b45..466fb3383960 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to, * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address * contains the (negative) exception code. */ -static __always_inline unsigned long follow_table(struct mm_struct *mm, - unsigned long addr, int write) +#ifdef CONFIG_64BIT +static unsigned long follow_table(struct mm_struct *mm, + unsigned long address, int write) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep; + unsigned long *table = (unsigned long *)__pa(mm->pgd); + + switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { + case _ASCE_TYPE_REGION1: + table = table + ((address >> 53) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x39UL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_REGION2: + table = table + ((address >> 42) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x3aUL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_REGION3: + table = table + ((address >> 31) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x3bUL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_SEGMENT: + table = table + ((address >> 20) & 0x7ff); + if (unlikely(*table & _SEGMENT_ENTRY_INV)) + return -0x10UL; + if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { + if (write && (*table & _SEGMENT_ENTRY_RO)) + return -0x04UL; + return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + + (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); + } + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + } + table = table + ((address >> 12) & 0xff); + if (unlikely(*table & _PAGE_INVALID)) + return -0x11UL; + if (write && (*table & _PAGE_RO)) + return -0x04UL; + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); +} - pgd = pgd_offset(mm, addr); - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - return -0x3aUL; +#else /* CONFIG_64BIT */ - pud = pud_offset(pgd, addr); - if (pud_none(*pud) || unlikely(pud_bad(*pud))) - return -0x3bUL; +static unsigned long follow_table(struct mm_struct *mm, + unsigned long address, int write) +{ + unsigned long *table = (unsigned long *)__pa(mm->pgd); - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + table = table + ((address >> 20) & 0x7ff); + if (unlikely(*table & _SEGMENT_ENTRY_INV)) return -0x10UL; - if (pmd_large(*pmd)) { - if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) - return -0x04UL; - return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); - } - if (unlikely(pmd_bad(*pmd))) - return -0x10UL; - - ptep = pte_offset_map(pmd, addr); - if (!pte_present(*ptep)) + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + table = table + ((address >> 12) & 0xff); + if (unlikely(*table & _PAGE_INVALID)) return -0x11UL; - if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) + if (write && (*table & _PAGE_RO)) return -0x04UL; - - return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); } +#endif /* CONFIG_64BIT */ + static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, size_t n, int write_user) { @@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from) static size_t clear_user_pt(size_t n, void __user *to) { - void *zpage = &empty_zero_page; + void *zpage = (void *) empty_zero_page; long done, size, ret; done = 0; diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index e26d430ce2fd..ff18e3cfb6b1 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -2,11 +2,16 @@ generic-y += clkdev.h +generic-y += cputime.h generic-y += div64.h +generic-y += emergency-restart.h generic-y += exec.h generic-y += local64.h +generic-y += mutex.h generic-y += irq_regs.h generic-y += local.h generic-y += module.h +generic-y += serial.h generic-y += trace_clock.h +generic-y += types.h generic-y += word-at-a-time.h diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h deleted file mode 100644 index 1a642b81e019..000000000000 --- a/arch/sparc/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC_CPUTIME_H -#define __SPARC_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __SPARC_CPUTIME_H */ diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/arch/sparc/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include <asm-generic/emergency-restart.h> - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h deleted file mode 100644 index 458c1f7fbc18..000000000000 --- a/arch/sparc/include/asm/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include <asm-generic/mutex-dec.h> diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 08fcce90316b..7619f2f792af 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); } +#include <asm/tlbflush.h> #include <asm-generic/pgtable.h> /* We provide our own get_unmapped_area to cope with VA holes and diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h deleted file mode 100644 index f90d61c28059..000000000000 --- a/arch/sparc/include/asm/serial.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC_SERIAL_H -#define __SPARC_SERIAL_H - -#define BASE_BAUD ( 1843200 / 16 ) - -#endif /* __SPARC_SERIAL_H */ diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index b73da3c5f10a..3c8917f054de 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); void cpu_panic(void); -extern void smp4m_irq_rotate(int cpu); /* * General functions that each host system must provide. @@ -46,7 +45,6 @@ void sun4m_init_smp(void); void sun4d_init_smp(void); void smp_callin(void); -void smp_boot_cpus(void); void smp_store_cpu_info(int); void smp_resched_interrupt(void); @@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) -#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier -#define prof_counter(__cpu) cpu_data(__cpu).counter - void smp_setup_cpu_possible_map(void); #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index cad36f56fa03..c7de3323819c 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -18,8 +18,7 @@ do { \ * and 2 stores in this critical code path. -DaveM */ #define switch_to(prev, next, last) \ -do { flush_tlb_pending(); \ - save_and_clear_fpu(); \ +do { save_and_clear_fpu(); \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index 2ef463494153..f0d6a9700f4c 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -11,24 +11,40 @@ struct tlb_batch { struct mm_struct *mm; unsigned long tlb_nr; + unsigned long active; unsigned long vaddrs[TLB_BATCH_NR]; }; extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); extern void flush_tsb_user(struct tlb_batch *tb); +extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); /* TLB flush operations. */ -extern void flush_tlb_pending(void); +static inline void flush_tlb_mm(struct mm_struct *mm) +{ +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define flush_tlb_range(vma,start,end) \ - do { (void)(start); flush_tlb_pending(); } while (0) -#define flush_tlb_page(vma,addr) flush_tlb_pending() -#define flush_tlb_mm(mm) flush_tlb_pending() +extern void flush_tlb_pending(void); +extern void arch_enter_lazy_mmu_mode(void); +extern void arch_leave_lazy_mmu_mode(void); +#define arch_flush_lazy_mmu_mode() do {} while (0) /* Local cpu only. */ extern void __flush_tlb_all(void); - +extern void __flush_tlb_page(unsigned long context, unsigned long vaddr); extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifndef CONFIG_SMP @@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \ __flush_tlb_kernel_range(start,end); \ } while (0) +static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); +} + #else /* CONFIG_SMP */ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); #define flush_tlb_kernel_range(start, end) \ do { flush_tsb_kernel_range(start,end); \ smp_flush_tlb_kernel_range(start, end); \ } while (0) +#define global_flush_tlb_page(mm, vaddr) \ + smp_flush_tlb_page(mm, vaddr) + #endif /* ! CONFIG_SMP */ #endif /* _SPARC64_TLBFLUSH_H */ diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild index ce175aff71b7..b5843ee09fb5 100644 --- a/arch/sparc/include/uapi/asm/Kbuild +++ b/arch/sparc/include/uapi/asm/Kbuild @@ -44,7 +44,6 @@ header-y += swab.h header-y += termbits.h header-y += termios.h header-y += traps.h -header-y += types.h header-y += uctx.h header-y += unistd.h header-y += utrap.h diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h deleted file mode 100644 index 383d156cde9c..000000000000 --- a/arch/sparc/include/uapi/asm/types.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _SPARC_TYPES_H -#define _SPARC_TYPES_H -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ - -#if defined(__sparc__) - -#include <asm-generic/int-ll64.h> - -#endif /* defined(__sparc__) */ - -#endif /* defined(_SPARC_TYPES_H) */ diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 537eb66abd06..ca64d2a86ec0 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm) } extern unsigned long xcall_flush_tlb_mm; -extern unsigned long xcall_flush_tlb_pending; +extern unsigned long xcall_flush_tlb_page; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_fetch_glob_regs; extern unsigned long xcall_fetch_glob_pmu; @@ -1074,23 +1074,56 @@ local_flush_and_out: put_cpu(); } +struct tlb_pending_info { + unsigned long ctx; + unsigned long nr; + unsigned long *vaddrs; +}; + +static void tlb_pending_func(void *info) +{ + struct tlb_pending_info *t = info; + + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); +} + void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) { u32 ctx = CTX_HWBITS(mm->context); + struct tlb_pending_info info; int cpu = get_cpu(); + info.ctx = ctx; + info.nr = nr; + info.vaddrs = vaddrs; + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else - smp_cross_call_masked(&xcall_flush_tlb_pending, - ctx, nr, (unsigned long) vaddrs, - mm_cpumask(mm)); + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); put_cpu(); } +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long context = CTX_HWBITS(mm->context); + int cpu = get_cpu(); + + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); + else + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); + + put_cpu(); +} + void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= PAGE_MASK; diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 48d00e72ce15..8ec4e9c0251a 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c @@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len) void bit_map_init(struct bit_map *t, unsigned long *map, int size) { - - if ((size & 07) != 0) - BUG(); - memset(map, 0, size>>3); - + bitmap_zero(map, size); memset(t, 0, sizeof *t); spin_lock_init(&t->lock); t->map = map; diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 0f4f7191fbba..28f96f27c768 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -34,7 +34,7 @@ #define IOMMU_RNGE IOMMU_RNGE_256MB #define IOMMU_START 0xF0000000 #define IOMMU_WINSIZE (256*1024*1024U) -#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */ +#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */ #define IOMMU_ORDER 6 /* 4096 * (1<<6) */ /* srmmu.c */ diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c38bb72e3e80..036c2797dece 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void) SRMMU_NOCACHE_ALIGN_MAX, 0UL); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); - srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); + srmmu_nocache_bitmap = + __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long), + SMP_CACHE_BYTES, 0UL); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index ba6ae7ffdc2c..83d89bcb44af 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); + struct mm_struct *mm = tb->mm; - if (tb->tlb_nr) { - flush_tsb_user(tb); + if (!tb->tlb_nr) + goto out; - if (CTX_VALID(tb->mm->context)) { + flush_tsb_user(tb); + + if (CTX_VALID(mm->context)) { + if (tb->tlb_nr == 1) { + global_flush_tlb_page(mm, tb->vaddrs[0]); + } else { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); @@ -37,12 +43,30 @@ void flush_tlb_pending(void) tb->tlb_nr, &tb->vaddrs[0]); #endif } - tb->tlb_nr = 0; } + tb->tlb_nr = 0; + +out: put_cpu_var(tlb_batch); } +void arch_enter_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + tb->active = 1; +} + +void arch_leave_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + if (tb->tlb_nr) + flush_tlb_pending(); + tb->active = 0; +} + static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, bool exec) { @@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, nr = 0; } + if (!tb->active) { + global_flush_tlb_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr); + goto out; + } + if (nr == 0) tb->mm = mm; @@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, if (nr >= TLB_BATCH_NR) flush_tlb_pending(); +out: put_cpu_var(tlb_batch); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 428982b9becf..2cc3bce5ee91 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -7,11 +7,10 @@ #include <linux/preempt.h> #include <linux/slab.h> #include <asm/page.h> -#include <asm/tlbflush.h> -#include <asm/tlb.h> -#include <asm/mmu_context.h> #include <asm/pgtable.h> +#include <asm/mmu_context.h> #include <asm/tsb.h> +#include <asm/tlb.h> #include <asm/oplib.h> extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) } } -static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, - unsigned long tsb, unsigned long nentries) +static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, + unsigned long hash_shift, + unsigned long nentries) { - unsigned long i; + unsigned long tag, ent, hash; - for (i = 0; i < tb->tlb_nr; i++) { - unsigned long v = tb->vaddrs[i]; - unsigned long tag, ent, hash; + v &= ~0x1UL; + hash = tsb_hash(v, hash_shift, nentries); + ent = tsb + (hash * sizeof(struct tsb)); + tag = (v >> 22UL); - v &= ~0x1UL; + tsb_flush(ent, tag); +} - hash = tsb_hash(v, hash_shift, nentries); - ent = tsb + (hash * sizeof(struct tsb)); - tag = (v >> 22UL); +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, + unsigned long tsb, unsigned long nentries) +{ + unsigned long i; - tsb_flush(ent, tag); - } + for (i = 0; i < tb->tlb_nr; i++) + __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); } void flush_tsb_user(struct tlb_batch *tb) @@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb) spin_unlock_irqrestore(&mm->context.lock, flags); } +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long nentries, base, flags; + + spin_lock_irqsave(&mm->context.lock, flags); + + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); + } +#endif + spin_unlock_irqrestore(&mm->context.lock, flags); +} + #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index f8e13d421fcb..432aa0cb1b38 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */ nop .align 32 + .globl __flush_tlb_page +__flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, %pstate + mov SECONDARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + andn %o1, 1, %o3 + be,pn %icc, 1f + or %o3, 0x10, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + retl + wrpr %g7, 0x0, %pstate + nop + nop + nop + nop + + .align 32 .globl __flush_tlb_pending __flush_tlb_pending: /* 26 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ @@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */ retl wrpr %g7, 0x0, %pstate +__cheetah_flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, 0x0, %pstate + wrpr %g0, 1, %tl + mov PRIMARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 + sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 + or %o0, %o3, %o0 /* Preserve nucleus page size fields */ + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + be,pn %icc, 1f + andn %o1, 1, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + wrpr %g0, 0, %tl + retl + wrpr %g7, 0x0, %pstate + __cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 @@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */ retl nop +__hypervisor_flush_tlb_page: /* 11 insns */ + /* %o0 = context, %o1 = vaddr */ + mov %o0, %g2 + mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ + mov %g2, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 + retl + nop + __hypervisor_flush_tlb_pending: /* 16 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 @@ -339,6 +405,13 @@ cheetah_patch_cachetlbops: call tlb_patch_one mov 19, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__cheetah_flush_tlb_page), %o1 + or %o1, %lo(__cheetah_flush_tlb_page), %o1 + call tlb_patch_one + mov 22, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 @@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */ nop nop - .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=context, %g1=nr, %g7=vaddrs[] */ - sllx %g1, 3, %g1 + .globl xcall_flush_tlb_page +xcall_flush_tlb_page: /* 17 insns */ + /* %g5=context, %g1=vaddr */ mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */ or %g5, %g4, %g5 mov PRIMARY_CONTEXT, %g4 stxa %g5, [%g4] ASI_DMMU -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %g5 - andcc %g5, 0x1, %g0 + andcc %g1, 0x1, %g0 be,pn %icc, 2f - - andn %g5, 0x1, %g5 + andn %g1, 0x1, %g5 stxa %g0, [%g5] ASI_IMMU_DEMAP 2: stxa %g0, [%g5] ASI_DMMU_DEMAP membar #Sync - brnz,pt %g1, 1b - nop stxa %g2, [%g4] ASI_DMMU retry nop + nop .globl xcall_flush_tlb_kernel_range xcall_flush_tlb_kernel_range: /* 25 insns */ @@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ membar #Sync retry - .globl __hypervisor_xcall_flush_tlb_pending -__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ - sllx %g1, 3, %g1 + .globl __hypervisor_xcall_flush_tlb_page +__hypervisor_xcall_flush_tlb_page: /* 17 insns */ + /* %g5=ctx, %g1=vaddr */ mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ + mov %g1, %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 @@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,a,pn %o0, __hypervisor_tlb_xcall_error mov %o0, %g5 - brnz,pt %g1, 1b - nop mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 @@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 10, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__hypervisor_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_flush_tlb_page), %o1 + call tlb_patch_one + mov 11, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 @@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 21, %o2 - sethi %hi(xcall_flush_tlb_pending), %o0 - or %o0, %lo(xcall_flush_tlb_pending), %o0 - sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 - or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 + sethi %hi(xcall_flush_tlb_page), %o0 + or %o0, %lo(xcall_flush_tlb_page), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 call tlb_patch_one - mov 21, %o2 + mov 17, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 241c0bb60b12..c96f9bbb760d 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -40,7 +40,15 @@ #include <asm/percpu.h> #include <arch/spr_def.h> -/* Set and clear kernel interrupt masks. */ +/* + * Set and clear kernel interrupt masks. + * + * NOTE: __insn_mtspr() is a compiler builtin marked as a memory + * clobber. We rely on it being equivalent to a compiler barrier in + * this code since arch_local_irq_save() and friends must act as + * compiler barriers. This compiler semantic is baked into enough + * places that the compiler will maintain it going forward. + */ #if CHIP_HAS_SPLIT_INTR_MASK() #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 # error Fix assumptions about which word various interrupts are in diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index d1e15f7b59c6..7a5aa1a7864e 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot) #ifdef CONFIG_BLK_DEV_INITRD -/* - * Note that the kernel can potentially support other compression - * techniques than gz, though we don't do so by default. If we ever - * decide to do so we can either look for other filename extensions, - * or just allow a file with this name to be compressed with an - * arbitrary compressor (somewhat counterintuitively). - */ static int __initdata set_initramfs_file; -static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; +static char __initdata initramfs_file[128] = "initramfs"; static int __init setup_initramfs_file(char *str) { @@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str) early_param("initramfs_file", setup_initramfs_file); /* - * We look for an "initramfs.cpio.gz" file in the hvfs. - * If there is one, we allocate some memory for it and it will be - * unpacked to the initramfs. + * We look for a file called "initramfs" in the hvfs. If there is one, we + * allocate some memory for it and it will be unpacked to the initramfs. + * If it's compressed, the initd code will uncompress it first. */ static void __init load_hv_initrd(void) { @@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void) fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); if (fd == HV_ENOENT) { - if (set_initramfs_file) + if (set_initramfs_file) { pr_warning("No such hvfs initramfs file '%s'\n", initramfs_file); - return; + return; + } else { + /* Try old backwards-compatible name. */ + fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); + if (fd == HV_ENOENT) + return; + } } BUG_ON(fd < 0); stat = hv_fs_fstat(fd); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 70c0f3da0476..15b5cef4aa38 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1549,6 +1549,7 @@ config X86_SMAP config EFI bool "EFI runtime service support" depends on ACPI + select UCS2_STRING ---help--- This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 8a84501acb1b..5ef205c5f37b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -4,7 +4,7 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/piggy.o $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone -$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone ifeq ($(CONFIG_EFI_STUB), y) VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o @@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -targets += vmlinux.bin.all vmlinux.relocs +targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs CMD_RELOCS = arch/x86/tools/relocs quiet_cmd_relocs = RELOCS $@ diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index c205035a6b96..35ee62fccf98 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) *size = len; } +static efi_status_t setup_efi_vars(struct boot_params *params) +{ + struct setup_data *data; + struct efi_var_bootdata *efidata; + u64 store_size, remaining_size, var_size; + efi_status_t status; + + if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION) + return EFI_UNSUPPORTED; + + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + + while (data && data->next) + data = (struct setup_data *)(unsigned long)data->next; + + status = efi_call_phys4((void *)sys_table->runtime->query_variable_info, + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, &store_size, + &remaining_size, &var_size); + + if (status != EFI_SUCCESS) + return status; + + status = efi_call_phys3(sys_table->boottime->allocate_pool, + EFI_LOADER_DATA, sizeof(*efidata), &efidata); + + if (status != EFI_SUCCESS) + return status; + + efidata->data.type = SETUP_EFI_VARS; + efidata->data.len = sizeof(struct efi_var_bootdata) - + sizeof(struct setup_data); + efidata->data.next = 0; + efidata->store_size = store_size; + efidata->remaining_size = remaining_size; + efidata->max_var_size = var_size; + + if (data) + data->next = (unsigned long)efidata; + else + params->hdr.setup_data = (unsigned long)efidata; + +} + static efi_status_t setup_efi_pci(struct boot_params *params) { efi_pci_io_protocol *pci; @@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table, setup_graphics(boot_params); + setup_efi_vars(boot_params); + setup_efi_pci(boot_params); status = efi_call_phys3(sys_table->boottime->allocate_pool, diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 60c89f30c727..2fb5d5884e23 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void); extern void efi_unmap_memmap(void); extern void efi_memory_uc(u64 addr, unsigned long size); +struct efi_var_bootdata { + struct setup_data data; + u64 store_size; + u64 remaining_size; + u64 max_var_size; +}; + #ifdef CONFIG_EFI static inline bool efi_is_native(void) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5edd1742cfd0..7361e47db79f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void) PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); } -void arch_flush_lazy_mmu_mode(void); +static inline void arch_flush_lazy_mmu_mode(void) +{ + PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); +} static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 142236ed83af..b3b0ec1dac86 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -91,6 +91,7 @@ struct pv_lazy_ops { /* Set deferred update mode, used for batching operations. */ void (*enter)(void); void (*leave)(void); + void (*flush)(void); }; struct pv_time_ops { @@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next); void paravirt_enter_lazy_mmu(void); void paravirt_leave_lazy_mmu(void); +void paravirt_flush_lazy_mmu(void); void _paravirt_nop(void); u32 _paravirt_ident_32(u32); diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 1ace47b62592..2e188d68397c 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[]; */ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return regs->orig_ax & __SYSCALL_MASK; + return regs->orig_ax; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { - regs->ax = regs->orig_ax & __SYSCALL_MASK; + regs->ax = regs->orig_ax; } static inline long syscall_get_error(struct task_struct *task, diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 4fef20773b8f..c7797307fc2b 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -7,7 +7,7 @@ #define tlb_flush(tlb) \ { \ - if (tlb->fullmm == 0) \ + if (!tlb->fullmm && !tlb->need_flush_all) \ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ else \ flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index c15ddaf90710..08744242b8d2 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -6,6 +6,7 @@ #define SETUP_E820_EXT 1 #define SETUP_DTB 2 #define SETUP_PCI 3 +#define SETUP_EFI_VARS 4 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index a7d26d83fb70..8f4be53ea04b 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void) if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) return false; - /* - * Xen emulates Hyper-V to support enlightened Windows. - * Check to see first if we are on a Xen Hypervisor. - */ - if (xen_cpuid_base()) - return false; - cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); @@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void) if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); -#if IS_ENABLED(CONFIG_HYPERV) - /* - * Setup the IDT for hypervisor callback. - */ - alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); -#endif } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { @@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr; void hv_register_vmbus_handler(int irq, irq_handler_t handler) { + /* + * Setup the IDT for hypervisor callback. + */ + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); + vmbus_irq = irq; vmbus_isr = handler; } diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index dab7580c47ae..cc45deb791b0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -153,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = }; static struct extra_reg intel_snb_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), + EVENT_EXTRA_END +}; + +static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), EVENT_EXTRA_END }; @@ -2097,7 +2103,10 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - x86_pmu.extra_regs = intel_snb_extra_regs; + if (boot_cpu_data.x86_model == 45) + x86_pmu.extra_regs = intel_snbep_extra_regs; + else + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; @@ -2123,7 +2132,10 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_ivb_event_constraints; x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - x86_pmu.extra_regs = intel_snb_extra_regs; + if (boot_cpu_data.x86_model == 62) + x86_pmu.extra_regs = intel_snbep_extra_regs; + else + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b05a575d56f4..26830f3af0df 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void) if (top <= at) return 0; + memset(®s, 0, sizeof(regs)); + ds->bts_index = ds->bts_buffer_base; perf_sample_data_init(&data, 0, event->hw.last_period); - regs.ip = 0; /* * Prepare a generic sample, i.e. fill in the invariant fields. diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c index 577db8417d15..833d51d6ee06 100644 --- a/arch/x86/kernel/microcode_core_early.c +++ b/arch/x86/kernel/microcode_core_early.c @@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void) u32 eax = 0x00000000; u32 ebx, ecx = 0, edx; - if (!have_cpuid_p()) - return X86_VENDOR_UNKNOWN; - native_cpuid(&eax, &ebx, &ecx, &edx); if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) @@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void) return X86_VENDOR_UNKNOWN; } +static int __cpuinit x86_family(void) +{ + u32 eax = 0x00000001; + u32 ebx, ecx = 0, edx; + int x86; + + native_cpuid(&eax, &ebx, &ecx, &edx); + + x86 = (eax >> 8) & 0xf; + if (x86 == 15) + x86 += (eax >> 20) & 0xff; + + return x86; +} + void __init load_ucode_bsp(void) { - int vendor = x86_vendor(); + int vendor, x86; + + if (!have_cpuid_p()) + return; - if (vendor == X86_VENDOR_INTEL) + vendor = x86_vendor(); + x86 = x86_family(); + + if (vendor == X86_VENDOR_INTEL && x86 >= 6) load_ucode_intel_bsp(); } void __cpuinit load_ucode_ap(void) { - int vendor = x86_vendor(); + int vendor, x86; + + if (!have_cpuid_p()) + return; + + vendor = x86_vendor(); + x86 = x86_family(); - if (vendor == X86_VENDOR_INTEL) + if (vendor == X86_VENDOR_INTEL && x86 >= 6) load_ucode_intel_ap(); } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 17fff18a1031..8bfb335f74bb 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void) leave_lazy(PARAVIRT_LAZY_MMU); } +void paravirt_flush_lazy_mmu(void) +{ + preempt_disable(); + + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { + arch_leave_lazy_mmu_mode(); + arch_enter_lazy_mmu_mode(); + } + + preempt_enable(); +} + void paravirt_start_context_switch(struct task_struct *prev) { BUG_ON(preemptible()); @@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return this_cpu_read(paravirt_lazy_mode); } -void arch_flush_lazy_mmu_mode(void) -{ - preempt_disable(); - - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { - arch_leave_lazy_mmu_mode(); - arch_enter_lazy_mmu_mode(); - } - - preempt_enable(); -} - struct pv_info pv_info = { .name = "bare hardware", .paravirt_enabled = 0, @@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = { .lazy_mode = { .enter = paravirt_nop, .leave = paravirt_nop, + .flush = paravirt_nop, }, .set_fixmap = native_set_fixmap, diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 90d8cc930f5e..fae9134a2de9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void) /* * Keep the crash kernel below this limit. On 32 bits earlier kernels * would limit the kernel to the low 512 MiB due to mapping restrictions. + * On 64bit, old kexec-tools need to under 896MiB. */ #ifdef CONFIG_X86_32 -# define CRASH_KERNEL_ADDR_MAX (512 << 20) +# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20) +# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20) #else -# define CRASH_KERNEL_ADDR_MAX MAXMEM +# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20) +# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM #endif static void __init reserve_crashkernel_low(void) @@ -521,19 +524,35 @@ static void __init reserve_crashkernel_low(void) unsigned long long low_base = 0, low_size = 0; unsigned long total_low_mem; unsigned long long base; + bool auto_set = false; int ret; total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); + /* crashkernel=Y,low */ ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); - if (ret != 0 || low_size <= 0) - return; + if (ret != 0) { + /* + * two parts from lib/swiotlb.c: + * swiotlb size: user specified with swiotlb= or default. + * swiotlb overflow buffer: now is hardcoded to 32k. + * We round it to 8M for other buffers that + * may need to stay low too. + */ + low_size = swiotlb_size_or_default() + (8UL<<20); + auto_set = true; + } else { + /* passed with crashkernel=0,low ? */ + if (!low_size) + return; + } low_base = memblock_find_in_range(low_size, (1ULL<<32), low_size, alignment); if (!low_base) { - pr_info("crashkernel low reservation failed - No suitable area found.\n"); + if (!auto_set) + pr_info("crashkernel low reservation failed - No suitable area found.\n"); return; } @@ -554,14 +573,22 @@ static void __init reserve_crashkernel(void) const unsigned long long alignment = 16<<20; /* 16M */ unsigned long long total_mem; unsigned long long crash_size, crash_base; + bool high = false; int ret; total_mem = memblock_phys_mem_size(); + /* crashkernel=XM */ ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); - if (ret != 0 || crash_size <= 0) - return; + if (ret != 0 || crash_size <= 0) { + /* crashkernel=X,high */ + ret = parse_crashkernel_high(boot_command_line, total_mem, + &crash_size, &crash_base); + if (ret != 0 || crash_size <= 0) + return; + high = true; + } /* 0 means: find the address automatically */ if (crash_base <= 0) { @@ -569,7 +596,9 @@ static void __init reserve_crashkernel(void) * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX */ crash_base = memblock_find_in_range(alignment, - CRASH_KERNEL_ADDR_MAX, crash_size, alignment); + high ? CRASH_KERNEL_ADDR_HIGH_MAX : + CRASH_KERNEL_ADDR_LOW_MAX, + crash_size, alignment); if (!crash_base) { pr_info("crashkernel reservation failed - No suitable area found.\n"); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd4e4ad..f77df1c5de6e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) if (!pv_eoi_enabled(vcpu)) return 0; return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr); + addr, sizeof(u8)); } void kvm_lapic_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f19ac0aca60d..e1721324c271 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, + sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) gpa_offset = data & ~(PAGE_MASK | 1); - /* Check that the address is 32-byte aligned. */ - if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) - break; - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, - &vcpu->arch.pv_time, data & ~1ULL)) + &vcpu->arch.pv_time, data & ~1ULL, + sizeof(struct pvclock_vcpu_time_info))) vcpu->arch.pv_time_enabled = false; else vcpu->arch.pv_time_enabled = true; @@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, - data & KVM_STEAL_VALID_BITS)) + data & KVM_STEAL_VALID_BITS, + sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 1cbd89ca5569..7114c63f047d 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1334,6 +1334,7 @@ __init void lguest_init(void) pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; + pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; pv_mmu_ops.pte_update = lguest_pte_update; pv_mmu_ops.pte_update_defer = lguest_pte_update; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2b97525246d4..0e883364abb5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (pgd_none(*pgd_ref)) return -1; - if (pgd_none(*pgd)) + if (pgd_none(*pgd)) { set_pgd(pgd, *pgd_ref); - else + arch_flush_lazy_mmu_mode(); + } else { BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + } /* * Below here mismatches are bugs because these lower tables diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index b0086567271c..0e38951e65eb 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -68,7 +68,7 @@ static int print_split(struct split_state *s) s->gpg++; i += GPS/PAGE_SIZE; } else if (level == PG_LEVEL_2M) { - if (!(pte_val(*pte) & _PAGE_PSE)) { + if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) { printk(KERN_ERR "%lx level %d but not PSE %Lx\n", addr, level, (u64)pte_val(*pte)); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 091934e1d0d9..fb4e73ec24d8 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * We are safe now. Check whether the new pgprot is the same: */ old_pte = *kpte; - old_prot = new_prot = req_prot = pte_pgprot(old_pte); + old_prot = req_prot = pte_pgprot(old_pte); pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); @@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL * for the ancient hardware that doesn't support it. */ - if (pgprot_val(new_prot) & _PAGE_PRESENT) - pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; + if (pgprot_val(req_prot) & _PAGE_PRESENT) + pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL; else - pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); + pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); - new_prot = canon_pgprot(new_prot); + req_prot = canon_pgprot(req_prot); /* * old_pte points to the large page base address. So we need @@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable) * but that can deadlock->flush only current cpu: */ __flush_tlb_all(); + + arch_flush_lazy_mmu_mode(); } #ifdef CONFIG_HIBERNATION diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 193350b51f90..17fda6a8b3c2 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); + /* + * NOTE! For PAE, any changes to the top page-directory-pointer-table + * entries need a full cr3 reload to flush. + */ +#ifdef CONFIG_X86_PAE + tlb->need_flush_all = 1; +#endif tlb_remove_page(tlb, virt_to_page(pmd)); } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index fff986da6239..012b3f6a9bd6 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -41,6 +41,7 @@ #include <linux/io.h> #include <linux/reboot.h> #include <linux/bcd.h> +#include <linux/ucs2_string.h> #include <asm/setup.h> #include <asm/efi.h> @@ -51,6 +52,13 @@ #define EFI_DEBUG 1 +/* + * There's some additional metadata associated with each + * variable. Intel's reference implementation is 60 bytes - bump that + * to account for potential alignment constraints + */ +#define VAR_METADATA_SIZE 64 + struct efi __read_mostly efi = { .mps = EFI_INVALID_TABLE_ADDR, .acpi = EFI_INVALID_TABLE_ADDR, @@ -69,6 +77,13 @@ struct efi_memory_map memmap; static struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; +static u64 efi_var_store_size; +static u64 efi_var_remaining_size; +static u64 efi_var_max_var_size; +static u64 boot_used_size; +static u64 boot_var_size; +static u64 active_size; + unsigned long x86_efi_facility; /* @@ -98,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg) } early_param("add_efi_memmap", setup_add_efi_memmap); +static bool efi_no_storage_paranoia; + +static int __init setup_storage_paranoia(char *arg) +{ + efi_no_storage_paranoia = true; + return 0; +} +early_param("efi_no_storage_paranoia", setup_storage_paranoia); + static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { @@ -162,8 +186,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { - return efi_call_virt3(get_next_variable, - name_size, name, vendor); + efi_status_t status; + static bool finished = false; + static u64 var_size; + + status = efi_call_virt3(get_next_variable, + name_size, name, vendor); + + if (status == EFI_NOT_FOUND) { + finished = true; + if (var_size < boot_used_size) { + boot_var_size = boot_used_size - var_size; + active_size += boot_var_size; + } else { + printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n"); + } + } + + if (boot_used_size && !finished) { + unsigned long size; + u32 attr; + efi_status_t s; + void *tmp; + + s = virt_efi_get_variable(name, vendor, &attr, &size, NULL); + + if (s != EFI_BUFFER_TOO_SMALL || !size) + return status; + + tmp = kmalloc(size, GFP_ATOMIC); + + if (!tmp) + return status; + + s = virt_efi_get_variable(name, vendor, &attr, &size, tmp); + + if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) { + var_size += size; + var_size += ucs2_strsize(name, 1024); + active_size += size; + active_size += VAR_METADATA_SIZE; + active_size += ucs2_strsize(name, 1024); + } + + kfree(tmp); + } + + return status; } static efi_status_t virt_efi_set_variable(efi_char16_t *name, @@ -172,9 +241,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, unsigned long data_size, void *data) { - return efi_call_virt5(set_variable, - name, vendor, attr, - data_size, data); + efi_status_t status; + u32 orig_attr = 0; + unsigned long orig_size = 0; + + status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size, + NULL); + + if (status != EFI_BUFFER_TOO_SMALL) + orig_size = 0; + + status = efi_call_virt5(set_variable, + name, vendor, attr, + data_size, data); + + if (status == EFI_SUCCESS) { + if (orig_size) { + active_size -= orig_size; + active_size -= ucs2_strsize(name, 1024); + active_size -= VAR_METADATA_SIZE; + } + if (data_size) { + active_size += data_size; + active_size += ucs2_strsize(name, 1024); + active_size += VAR_METADATA_SIZE; + } + } + + return status; } static efi_status_t virt_efi_query_variable_info(u32 attr, @@ -683,6 +777,9 @@ void __init efi_init(void) char vendor[100] = "unknown"; int i = 0; void *tmp; + struct setup_data *data; + struct efi_var_bootdata *efi_var_data; + u64 pa_data; #ifdef CONFIG_X86_32 if (boot_params.efi_info.efi_systab_hi || @@ -700,6 +797,22 @@ void __init efi_init(void) if (efi_systab_init(efi_phys.systab)) return; + pa_data = boot_params.hdr.setup_data; + while (pa_data) { + data = early_ioremap(pa_data, sizeof(*efi_var_data)); + if (data->type == SETUP_EFI_VARS) { + efi_var_data = (struct efi_var_bootdata *)data; + + efi_var_store_size = efi_var_data->store_size; + efi_var_remaining_size = efi_var_data->remaining_size; + efi_var_max_var_size = efi_var_data->max_var_size; + } + pa_data = data->next; + early_iounmap(data, sizeof(*efi_var_data)); + } + + boot_used_size = efi_var_store_size - efi_var_remaining_size; + set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); /* @@ -1000,3 +1113,48 @@ u64 efi_mem_attributes(unsigned long phys_addr) } return 0; } + +/* + * Some firmware has serious problems when using more than 50% of the EFI + * variable store, i.e. it triggers bugs that can brick machines. Ensure that + * we never use more than this safe limit. + * + * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable + * store. + */ +efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) +{ + efi_status_t status; + u64 storage_size, remaining_size, max_size; + + status = efi.query_variable_info(attributes, &storage_size, + &remaining_size, &max_size); + if (status != EFI_SUCCESS) + return status; + + if (!max_size && remaining_size > size) + printk_once(KERN_ERR FW_BUG "Broken EFI implementation" + " is returning MaxVariableSize=0\n"); + /* + * Some firmware implementations refuse to boot if there's insufficient + * space in the variable store. We account for that by refusing the + * write if permitting it would reduce the available space to under + * 50%. However, some firmware won't reclaim variable space until + * after the used (not merely the actively used) space drops below + * a threshold. We can approximate that case with the value calculated + * above. If both the firmware and our calculations indicate that the + * available space would drop below 50%, refuse the write. + */ + + if (!storage_size || size > remaining_size || + (max_size && size > max_size)) + return EFI_OUT_OF_RESOURCES; + + if (!efi_no_storage_paranoia && + ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && + (remaining_size - size < storage_size / 2))) + return EFI_OUT_OF_RESOURCES; + + return EFI_SUCCESS; +} +EXPORT_SYMBOL_GPL(efi_query_variable_store); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 6afbb2ca9a0a..e006c18d288a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1748,14 +1748,18 @@ static void *m2v(phys_addr_t maddr) } /* Set the page permissions on an identity-mapped pages */ -static void set_page_prot(void *addr, pgprot_t prot) +static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) { unsigned long pfn = __pa(addr) >> PAGE_SHIFT; pte_t pte = pfn_pte(pfn, prot); - if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) + if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) BUG(); } +static void set_page_prot(void *addr, pgprot_t prot) +{ + return set_page_prot_flags(addr, prot, UVMF_NONE); +} #ifdef CONFIG_X86_32 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) { @@ -1839,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, unsigned long addr) { if (*pt_base == PFN_DOWN(__pa(addr))) { - set_page_prot((void *)addr, PAGE_KERNEL); + set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); clear_page((void *)addr); (*pt_base)++; } if (*pt_end == PFN_DOWN(__pa(addr))) { - set_page_prot((void *)addr, PAGE_KERNEL); + set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); clear_page((void *)addr); (*pt_end)--; } @@ -2196,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .lazy_mode = { .enter = paravirt_enter_lazy_mmu, .leave = xen_leave_lazy_mmu, + .flush = paravirt_flush_lazy_mmu, }, .set_fixmap = xen_set_fixmap, |