diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2005-11-24 14:16:15 +1100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-01-09 14:50:28 +1100 |
commit | 456752f7505ef8f580ffd157558e661da2767d99 (patch) | |
tree | 8854e9dd2d21ac440f691dd16881e596db61896b /arch | |
parent | 706e6b2caf285d3eb056c2847b7c53ae823e8a87 (diff) | |
download | linux-456752f7505ef8f580ffd157558e661da2767d99.tar.bz2 |
[PATCH] powerpc: Make hugepage mappings respect hint addresses
Currently, the powerpc version of hugetlb_get_unmapped_area() entirely
ignores the hint address. The only way to get a hugepage mapping at a
specified address is with MAP_FIXED, in which case there's no way
(short of parsing /proc/self/maps) for userspace to tell if it will
clobber an existing mapping. This is inconvenient, so the patch below
makes hugepage mappings use the given hint address if possible.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 43 |
1 files changed, 39 insertions, 4 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 54131b877da3..f6fe3eaf87a3 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -549,6 +549,17 @@ fail: return addr; } +static int htlb_check_hinted_area(unsigned long addr, unsigned long len) +{ + struct vm_area_struct *vma; + + vma = find_vma(current->mm, addr); + if (!vma || ((addr + len) <= vma->vm_start)) + return 0; + + return -ENOMEM; +} + static unsigned long htlb_get_low_area(unsigned long len, u16 segmask) { unsigned long addr = 0; @@ -609,6 +620,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, { int lastshift; u16 areamask, curareas; + struct vm_area_struct *vma; if (HPAGE_SHIFT == 0) return -EINVAL; @@ -618,15 +630,28 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (!cpu_has_feature(CPU_FTR_16M_PAGE)) return -EINVAL; + /* Paranoia, caller should have dealt with this */ + BUG_ON((addr + len) < addr); + if (test_thread_flag(TIF_32BIT)) { + /* Paranoia, caller should have dealt with this */ + BUG_ON((addr + len) > 0x100000000UL); + curareas = current->mm->context.low_htlb_areas; - /* First see if we can do the mapping in the existing - * low areas */ + /* First see if we can use the hint address */ + if (addr && (htlb_check_hinted_area(addr, len) == 0)) { + areamask = LOW_ESID_MASK(addr, len); + if (open_low_hpage_areas(current->mm, areamask) == 0) + return addr; + } + + /* Next see if we can map in the existing low areas */ addr = htlb_get_low_area(len, curareas); if (addr != -ENOMEM) return addr; + /* Finally go looking for areas to open */ lastshift = 0; for (areamask = LOW_ESID_MASK(0x100000000UL-len, len); ! lastshift; areamask >>=1) { @@ -641,12 +666,22 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } else { curareas = current->mm->context.high_htlb_areas; - /* First see if we can do the mapping in the existing - * high areas */ + /* First see if we can use the hint address */ + /* We discourage 64-bit processes from doing hugepage + * mappings below 4GB (must use MAP_FIXED) */ + if ((addr >= 0x100000000UL) + && (htlb_check_hinted_area(addr, len) == 0)) { + areamask = HTLB_AREA_MASK(addr, len); + if (open_high_hpage_areas(current->mm, areamask) == 0) + return addr; + } + + /* Next see if we can map in the existing high areas */ addr = htlb_get_high_area(len, curareas); if (addr != -ENOMEM) return addr; + /* Finally go looking for areas to open */ lastshift = 0; for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len); ! lastshift; areamask >>=1) { |