summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2022-04-09 19:17:35 +0200
committerMichael Ellerman <mpe@ellerman.id.au>2022-05-05 22:11:58 +1000
commit5cf7f9a0a54e93a6d3361de5f4ba4358b054c6c2 (patch)
tree91261076edc16a02ab4f3b99008bc3ac1cc0d20f /arch/powerpc/mm
parentab57bd7570d4393beb5a91bf092ed54e9c3574a2 (diff)
downloadlinux-5cf7f9a0a54e93a6d3361de5f4ba4358b054c6c2.tar.bz2
powerpc/mm: Enable full randomisation of memory mappings
Do like most other architectures and provide randomisation also to "legacy" memory mappings, by adding the random factor to mm->mmap_base in arch_pick_mmap_layout(). See commit 8b8addf891de ("x86/mm/32: Enable full randomization on i386 and X86_32") for all explanations and benefits of that mmap randomisation. At the moment, slice_find_area_bottomup() doesn't use mm->mmap_base but uses the fixed TASK_UNMAPPED_BASE instead. slice_find_area_bottomup() being used as a fallback to slice_find_area_topdown(), it can't use mm->mmap_base directly. Instead of always using TASK_UNMAPPED_BASE as base address, leave it to the caller. When called from slice_find_area_topdown() TASK_UNMAPPED_BASE is used. Otherwise mm->mmap_base is used. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/417fb10dde828534c73a03138b49621d74f4e5be.1649523076.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/book3s64/slice.c18
-rw-r--r--arch/powerpc/mm/mmap.c2
2 files changed, 8 insertions, 12 deletions
diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
index 03681042b807..c0b58afb9a47 100644
--- a/arch/powerpc/mm/book3s64/slice.c
+++ b/arch/powerpc/mm/book3s64/slice.c
@@ -276,20 +276,18 @@ static bool slice_scan_available(unsigned long addr,
}
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
- unsigned long len,
+ unsigned long addr, unsigned long len,
const struct slice_mask *available,
int psize, unsigned long high_limit)
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
- unsigned long addr, found, next_end;
+ unsigned long found, next_end;
struct vm_unmapped_area_info info;
info.flags = 0;
info.length = len;
info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
info.align_offset = 0;
-
- addr = TASK_UNMAPPED_BASE;
/*
* Check till the allow max value for this mmap request
*/
@@ -322,12 +320,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
}
static unsigned long slice_find_area_topdown(struct mm_struct *mm,
- unsigned long len,
+ unsigned long addr, unsigned long len,
const struct slice_mask *available,
int psize, unsigned long high_limit)
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
- unsigned long addr, found, prev;
+ unsigned long found, prev;
struct vm_unmapped_area_info info;
unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
@@ -335,8 +333,6 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
info.length = len;
info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
info.align_offset = 0;
-
- addr = mm->mmap_base;
/*
* If we are trying to allocate above DEFAULT_MAP_WINDOW
* Add the different to the mmap_base.
@@ -377,7 +373,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* can happen with large stack limits and large mmap()
* allocations.
*/
- return slice_find_area_bottomup(mm, len, available, psize, high_limit);
+ return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit);
}
@@ -386,9 +382,9 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
int topdown, unsigned long high_limit)
{
if (topdown)
- return slice_find_area_topdown(mm, len, mask, psize, high_limit);
+ return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit);
else
- return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
+ return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit);
}
static inline void slice_copy_mask(struct slice_mask *dst,
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 5972d619d274..d9eae456558a 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -96,7 +96,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy(rlim_stack)) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);