diff options
-rw-r--r-- | arch/powerpc/include/asm/page_64.h | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/slice.c | 10 |
2 files changed, 9 insertions, 12 deletions
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index bd55ff751938..c4d9654bd637 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -99,17 +99,6 @@ extern u64 ppc64_pft_size; #define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) #ifndef __ASSEMBLY__ -/* - * One bit per slice. We have lower slices which cover 256MB segments - * upto 4G range. That gets us 16 low slices. For the rest we track slices - * in 1TB size. - * 64 below is actually SLICE_NUM_HIGH to fixup complie errros - */ -struct slice_mask { - u16 low_slices; - DECLARE_BITMAP(high_slices, 64); -}; - struct mm_struct; extern unsigned long slice_get_unmapped_area(unsigned long addr, diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 175e0dbce89b..7599c2703520 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -37,7 +37,15 @@ #include <asm/hugetlb.h> static DEFINE_SPINLOCK(slice_convert_lock); - +/* + * One bit per slice. We have lower slices which cover 256MB segments + * upto 4G range. That gets us 16 low slices. For the rest we track slices + * in 1TB size. + */ +struct slice_mask { + u64 low_slices; + DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH); +}; #ifdef DEBUG int _slice_debug = 1; |