diff options
Diffstat (limited to 'arch/powerpc/include/asm/book3s/64')
| -rw-r--r-- | arch/powerpc/include/asm/book3s/64/mmu-hash.h | 1 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgalloc.h | 28 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/book3s/64/radix.h | 15 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/book3s/64/tlbflush-radix.h | 3 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/book3s/64/tlbflush.h | 14 | 
5 files changed, 53 insertions, 8 deletions
| diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 290157e8d5b2..74839f24f412 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -88,6 +88,7 @@  #define HPTE_R_RPN_SHIFT	12  #define HPTE_R_RPN		ASM_CONST(0x0ffffffffffff000)  #define HPTE_R_PP		ASM_CONST(0x0000000000000003) +#define HPTE_R_PPP		ASM_CONST(0x8000000000000003)  #define HPTE_R_N		ASM_CONST(0x0000000000000004)  #define HPTE_R_G		ASM_CONST(0x0000000000000008)  #define HPTE_R_M		ASM_CONST(0x0000000000000010) diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 488279edb1f0..cd5e7aa8cc34 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];  			pgtable_cache[(shift) - 1];	\  		}) -#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO +#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO  extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);  extern void pte_fragment_free(unsigned long *, int); @@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)  	return (pgd_t *)__get_free_page(PGALLOC_GFP);  #else  	struct page *page; -	page = alloc_pages(PGALLOC_GFP, 4); +	page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);  	if (!page)  		return NULL;  	return (pgd_t *) page_address(page); @@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)  static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)  { -	return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), -				GFP_KERNEL|__GFP_REPEAT); +	return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);  }  static inline void pud_free(struct mm_struct *mm, pud_t *pud) @@ -110,13 +109,17 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)  static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,                                    unsigned long address)  { +	/* +	 * By now all the pud entries should be none entries. So go +	 * ahead and flush the page walk cache +	 */ +	flush_tlb_pgtable(tlb, address);          pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);  }  static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)  { -	return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), -				GFP_KERNEL|__GFP_REPEAT); +	return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);  }  static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) @@ -127,6 +130,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)  static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,                                    unsigned long address)  { +	/* +	 * By now all the pud entries should be none entries. So go +	 * ahead and flush the page walk cache +	 */ +	flush_tlb_pgtable(tlb, address);          return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);  } @@ -151,7 +159,7 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)  static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,  					  unsigned long address)  { -	return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); +	return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);  }  static inline pgtable_t pte_alloc_one(struct mm_struct *mm, @@ -198,7 +206,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)  static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,  				  unsigned long address)  { -	tlb_flush_pgtable(tlb, address); +	/* +	 * By now all the pud entries should be none entries. So go +	 * ahead and flush the page walk cache +	 */ +	flush_tlb_pgtable(tlb, address);  	pgtable_free_tlb(tlb, table, 0);  } diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 937d4e247ac3..df294224e280 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -228,5 +228,20 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,  extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,  				 pgprot_t flags, unsigned int psz); + +static inline unsigned long radix__get_tree_size(void) +{ +	unsigned long rts_field; +	/* +	 * we support 52 bits, hence 52-31 = 21, 0b10101 +	 * RTS encoding details +	 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long +	 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long +	 */ +	rts_field = (0x5UL << 5); /* 6 - 8 bits */ +	rts_field |= (0x2UL << 61); + +	return rts_field; +}  #endif /* __ASSEMBLY__ */  #endif diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 13ef38828dfe..3fa94fcac628 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -18,16 +18,19 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm);  extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);  extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,  				    unsigned long ap, int nid); +extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);  extern void radix__tlb_flush(struct mmu_gather *tlb);  #ifdef CONFIG_SMP  extern void radix__flush_tlb_mm(struct mm_struct *mm);  extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);  extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,  			      unsigned long ap, int nid); +extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);  #else  #define radix__flush_tlb_mm(mm)		radix__local_flush_tlb_mm(mm)  #define radix__flush_tlb_page(vma,addr)	radix__local_flush_tlb_page(vma,addr)  #define radix___flush_tlb_page(mm,addr,p,i)	radix___local_flush_tlb_page(mm,addr,p,i) +#define radix__flush_tlb_pwc(tlb, addr)	radix__local_flush_tlb_pwc(tlb, addr)  #endif  #endif diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index d98424ae356c..96e5769b18b0 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,  #define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)  #define flush_tlb_page(vma, addr)	local_flush_tlb_page(vma, addr)  #endif /* CONFIG_SMP */ +/* + * flush the page walk cache for the address + */ +static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address) +{ +	/* +	 * Flush the page table walk cache on freeing a page table. We already +	 * have marked the upper/higher level page table entry none by now. +	 * So it is safe to flush PWC here. +	 */ +	if (!radix_enabled()) +		return; +	radix__flush_tlb_pwc(tlb, address); +}  #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */ |