summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-03-07 11:37:17 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2018-03-13 23:43:08 +1100
commit014a32b30e9d81b47ef82b9995b52c3a0c8b4082 (patch)
tree95876b99824a29ce76b8741b56084fb33c0bbd05
parentd262bd5a73998252d1cdf632bedaf1ca540839d8 (diff)
downloadlinux-014a32b30e9d81b47ef82b9995b52c3a0c8b4082.tar.bz2
powerpc/mm/slice: remove radix calls to the slice code
This is a tidy up which removes radix MMU calls into the slice code. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/hugetlb.h8
-rw-r--r--arch/powerpc/mm/hugetlbpage.c6
-rw-r--r--arch/powerpc/mm/slice.c17
3 files changed, 12 insertions, 19 deletions
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 1a4847f67ea8..48f2ed2a71ae 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -89,17 +89,17 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
void flush_dcache_icache_hugepage(struct page *page);
-#if defined(CONFIG_PPC_MM_SLICES)
-int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
+int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len);
-#else
+
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len)
{
+ if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
+ return slice_is_hugepage_only_range(mm, addr, len);
return 0;
}
-#endif
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
pte_t pte);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 590be3fa0ce2..f4153f21d214 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -565,10 +565,12 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
#ifdef CONFIG_PPC_MM_SLICES
- unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
/* With radix we don't use slice, so derive it from vma*/
- if (!radix_enabled())
+ if (!radix_enabled()) {
+ unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
+
return 1UL << mmu_psize_to_shift(psize);
+ }
#endif
if (!is_vm_hugetlb_page(vma))
return PAGE_SIZE;
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index b3b465c37224..1297b3ad7dd2 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -686,16 +686,8 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
unsigned char *psizes;
int index, mask_index;
- /*
- * Radix doesn't use slice, but can get enabled along with MMU_SLICE
- */
- if (radix_enabled()) {
-#ifdef CONFIG_PPC_64K_PAGES
- return MMU_PAGE_64K;
-#else
- return MMU_PAGE_4K;
-#endif
- }
+ VM_BUG_ON(radix_enabled());
+
if (addr < SLICE_LOW_TOP) {
psizes = mm->context.low_slices_psize;
index = GET_LOW_SLICE_INDEX(addr);
@@ -778,14 +770,13 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
* for now as we only use slices with hugetlbfs enabled. This should
* be fixed as the generic code gets fixed.
*/
-int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
+int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
const struct slice_mask *maskp;
unsigned int psize = mm->context.user_psize;
- if (radix_enabled())
- return 0;
+ VM_BUG_ON(radix_enabled());
maskp = slice_mask_for_size(mm, psize);
#ifdef CONFIG_PPC_64K_PAGES