diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2012-03-29 13:58:43 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2012-05-25 12:48:24 -0400 |
commit | d5d14ed6f2db7287a5088e1350cf422bf72140b3 (patch) | |
tree | 19f0bc20bb6f1995a1e4f75dc58e388c047f7d23 /arch/tile/mm | |
parent | 47d632f9f8f3ed62b21f725e98b726d65769b6d7 (diff) | |
download | linux-d5d14ed6f2db7287a5088e1350cf422bf72140b3.tar.bz2 |
arch/tile: Allow tilegx to build with either 16K or 64K page size
This change introduces new flags for the hv_install_context()
API that passes a page table pointer to the hypervisor. Clients
can explicitly request 4K, 16K, or 64K small pages when they
install a new context. In practice, the page size is fixed at
kernel compile time and the same size is always requested every
time a new page table is installed.
The <hv/hypervisor.h> header changes so that it provides more abstract
macros for managing "page" things like PFNs and page tables. For
example there is now a HV_DEFAULT_PAGE_SIZE_SMALL instead of the old
HV_PAGE_SIZE_SMALL. The various PFN routines have been eliminated and
only PA- or PTFN-based ones remain (since PTFNs are always expressed
in fixed 2KB "page" size). The page-table management macros are
renamed with a leading underscore and take page-size arguments with
the presumption that clients will use those macros in some single
place to provide the "real" macros they will use themselves.
I happened to notice the old hv_set_caching() API was totally broken
(it assumed 4KB pages) so I changed it so it would nominally work
correctly with other page sizes.
Tag modules with the page size so you can't load a module built with
a conflicting page size. (And add a test for SMP while we're at it.)
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/mm')
-rw-r--r-- | arch/tile/mm/init.c | 11 | ||||
-rw-r--r-- | arch/tile/mm/pgtable.c | 27 |
2 files changed, 15 insertions, 23 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 1e4633520b35..c04fbfd93fc5 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -82,7 +82,7 @@ static int num_l2_ptes[MAX_NUMNODES]; static void init_prealloc_ptes(int node, int pages) { - BUG_ON(pages & (HV_L2_ENTRIES-1)); + BUG_ON(pages & (PTRS_PER_PTE - 1)); if (pages) { num_l2_ptes[node] = pages; l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), @@ -131,14 +131,9 @@ static void __init assign_pte(pmd_t *pmd, pte_t *page_table) #ifdef __tilegx__ -#if HV_L1_SIZE != HV_L2_SIZE -# error Rework assumption that L1 and L2 page tables are same size. -#endif - -/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */ static inline pmd_t *alloc_pmd(void) { - return (pmd_t *)alloc_pte(); + return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); } static inline void assign_pmd(pud_t *pud, pmd_t *pmd) @@ -811,7 +806,7 @@ void __init paging_init(void) * changing init_mm once we get up and running, and there's no * need for e.g. vmalloc_sync_all(). */ - BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); + BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1)); pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); assign_pmd(pud, alloc_pmd()); #endif diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 2410aa899b3e..3d7074347e6d 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -289,13 +289,12 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER) -struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) +struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, + int order) { gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO; struct page *p; -#if L2_USER_PGTABLE_ORDER > 0 int i; -#endif #ifdef CONFIG_HIGHPTE flags |= __GFP_HIGHMEM; @@ -305,17 +304,15 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) if (p == NULL) return NULL; -#if L2_USER_PGTABLE_ORDER > 0 /* * Make every page have a page_count() of one, not just the first. * We don't use __GFP_COMP since it doesn't look like it works * correctly with tlb_remove_page(). */ - for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { + for (i = 1; i < order; ++i) { init_page_count(p+i); inc_zone_page_state(p+i, NR_PAGETABLE); } -#endif pgtable_page_ctor(p); return p; @@ -326,28 +323,28 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) * process). We have to correct whatever pte_alloc_one() did before * returning the pages to the allocator. */ -void pte_free(struct mm_struct *mm, struct page *p) +void pgtable_free(struct mm_struct *mm, struct page *p, int order) { int i; pgtable_page_dtor(p); __free_page(p); - for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { + for (i = 1; i < order; ++i) { __free_page(p+i); dec_zone_page_state(p+i, NR_PAGETABLE); } } -void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, - unsigned long address) +void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte, + unsigned long address, int order) { int i; pgtable_page_dtor(pte); tlb_remove_page(tlb, pte); - for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { + for (i = 1; i < order; ++i) { tlb_remove_page(tlb, pte + i); dec_zone_page_state(pte + i, NR_PAGETABLE); } @@ -490,7 +487,7 @@ void set_pte(pte_t *ptep, pte_t pte) /* Can this mm load a PTE with cached_priority set? */ static inline int mm_is_priority_cached(struct mm_struct *mm) { - return mm->context.priority_cached; + return mm->context.priority_cached != 0; } /* @@ -500,8 +497,8 @@ static inline int mm_is_priority_cached(struct mm_struct *mm) void start_mm_caching(struct mm_struct *mm) { if (!mm_is_priority_cached(mm)) { - mm->context.priority_cached = -1U; - hv_set_caching(-1U); + mm->context.priority_cached = -1UL; + hv_set_caching(-1UL); } } @@ -516,7 +513,7 @@ void start_mm_caching(struct mm_struct *mm) * Presumably we'll come back later and have more luck and clear * the value then; for now we'll just keep the cache marked for priority. */ -static unsigned int update_priority_cached(struct mm_struct *mm) +static unsigned long update_priority_cached(struct mm_struct *mm) { if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) { struct vm_area_struct *vm; |