summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/pgtable.h21
-rw-r--r--arch/s390/kernel/module.c11
-rw-r--r--arch/s390/kernel/setup.c13
-rw-r--r--arch/s390/mm/dump_pagetables.c13
4 files changed, 46 insertions, 12 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 75b91bb772bd..dd647c919a66 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -119,13 +119,12 @@ static inline int is_zero_pfn(unsigned long pfn)
#ifndef __ASSEMBLY__
/*
- * The vmalloc area will always be on the topmost area of the kernel
- * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
- * which should be enough for any sane case.
- * By putting vmalloc at the top, we maximise the gap between physical
- * memory and vmalloc to catch misplaced memory accesses. As a side
- * effect, this also makes sure that 64 bit module code cannot be used
- * as system call address.
+ * The vmalloc and module area will always be on the topmost area of the kernel
+ * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
+ * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
+ * modules will reside. That makes sure that inter module branches always
+ * happen without trampolines and in addition the placement within a 2GB frame
+ * is branch prediction unit friendly.
*/
extern unsigned long VMALLOC_START;
extern unsigned long VMALLOC_END;
@@ -133,6 +132,14 @@ extern struct page *vmemmap;
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
+#ifdef CONFIG_64BIT
+extern unsigned long MODULES_VADDR;
+extern unsigned long MODULES_END;
+#define MODULES_VADDR MODULES_VADDR
+#define MODULES_END MODULES_END
+#define MODULES_LEN (1UL << 31)
+#endif
+
/*
* A 31 bit pagetable entry of S390 has following format:
* | PFRA | | OS |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 46412b1d7e1e..4610deafd953 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -44,6 +44,17 @@
#define PLT_ENTRY_SIZE 20
#endif /* CONFIG_64BIT */
+#ifdef CONFIG_64BIT
+void *module_alloc(unsigned long size)
+{
+ if (PAGE_ALIGN(size) > MODULES_LEN)
+ return NULL;
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+ GFP_KERNEL, PAGE_KERNEL, -1,
+ __builtin_return_address(0));
+}
+#endif
+
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index bfb48f18169c..b1f2be9aaaad 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -105,6 +105,11 @@ EXPORT_SYMBOL(VMALLOC_END);
struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);
+#ifdef CONFIG_64BIT
+unsigned long MODULES_VADDR;
+unsigned long MODULES_END;
+#endif
+
/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
@@ -544,19 +549,23 @@ static void __init setup_memory_end(void)
/* Choose kernel address space layout: 2, 3, or 4 levels. */
#ifdef CONFIG_64BIT
- vmalloc_size = VMALLOC_END ?: 128UL << 30;
+ vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
if (tmp <= (1UL << 42))
vmax = 1UL << 42; /* 3-level kernel page table */
else
vmax = 1UL << 53; /* 4-level kernel page table */
+ /* module area is at the end of the kernel address space. */
+ MODULES_END = vmax;
+ MODULES_VADDR = MODULES_END - MODULES_LEN;
+ VMALLOC_END = MODULES_VADDR;
#else
vmalloc_size = VMALLOC_END ?: 96UL << 20;
vmax = 1UL << 31; /* 2-level kernel page table */
-#endif
/* vmalloc area is at the end of the kernel address space. */
VMALLOC_END = vmax;
+#endif
VMALLOC_START = vmax - vmalloc_size;
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index cd1c62d160ed..cbc6668acb85 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,6 +18,9 @@ enum address_markers_idx {
KERNEL_END_NR,
VMEMMAP_NR,
VMALLOC_NR,
+#ifdef CONFIG_64BIT
+ MODULES_NR,
+#endif
};
static struct addr_marker address_markers[] = {
@@ -26,6 +29,9 @@ static struct addr_marker address_markers[] = {
[KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
[VMEMMAP_NR] = {0, "vmemmap Area"},
[VMALLOC_NR] = {0, "vmalloc Area"},
+#ifdef CONFIG_64BIT
+ [MODULES_NR] = {0, "Modules Area"},
+#endif
{ -1, NULL }
};
@@ -205,11 +211,12 @@ static int pt_dump_init(void)
* kernel ASCE. We need this to keep the page table walker functions
* from accessing non-existent entries.
*/
-#ifdef CONFIG_64BIT
+#ifdef CONFIG_32BIT
+ max_addr = 1UL << 31;
+#else
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
-#else
- max_addr = 1UL << 31;
+ address_markers[MODULES_NR].start_address = MODULES_VADDR;
#endif
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
address_markers[VMALLOC_NR].start_address = VMALLOC_START;