summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2013-01-14 10:20:02 -0800
committerOlof Johansson <olof@lixom.net>2013-01-14 10:20:02 -0800
commit8d84981e395850aab31c3f2ca7e2738e03f671d7 (patch)
tree933425fddb23d28be802277471df3fe3f6c2711d /include/asm-generic
parent00c82d64405631967dca3890a9ce80ab35d04cc7 (diff)
parent77cc982f6a3b33a5aa058ad3b20cda8866db2948 (diff)
downloadlinux-8d84981e395850aab31c3f2ca7e2738e03f671d7.tar.bz2
Merge branch 'clocksource/cleanup' into next/cleanup
Clockevent cleanup series from Shawn Guo. Resolved move/change conflict in mach-pxa/time.c due to the sys_timer cleanup. * clocksource/cleanup: clocksource: use clockevents_config_and_register() where possible ARM: use clockevents_config_and_register() where possible clockevents: export clockevents_config_and_register for module use + sync to Linux 3.8-rc3 Signed-off-by: Olof Johansson <olof@lixom.net> Conflicts: arch/arm/mach-pxa/time.c
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/parport.h4
-rw-r--r--include/asm-generic/tlb.h9
2 files changed, 11 insertions, 2 deletions
diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h
index 40528cb977e8..2c9f9d4336ca 100644
--- a/include/asm-generic/parport.h
+++ b/include/asm-generic/parport.h
@@ -10,8 +10,8 @@
* to devices on the PCI bus.
*/
-static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
-static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
+static int parport_pc_find_isa_ports(int autoirq, int autodma);
+static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
{
#ifdef CONFIG_ISA
return parport_pc_find_isa_ports(autoirq, autodma);
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index ed6642ad03e0..25f01d0bc149 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -78,6 +78,14 @@ struct mmu_gather_batch {
#define MAX_GATHER_BATCH \
((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
+/*
+ * Limit the maximum number of mmu_gather batches to reduce a risk of soft
+ * lockups for non-preemptible kernels on huge machines when a lot of memory
+ * is zapped during unmapping.
+ * 10K pages freed at once should be safe even without a preemption point.
+ */
+#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
+
/* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page.
*/
@@ -96,6 +104,7 @@ struct mmu_gather {
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
+ unsigned int batch_count;
};
#define HAVE_GENERIC_MMU_GATHER