diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-02-18 18:13:51 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-02-18 18:13:51 +0900 |
commit | d01447b3197c2c470a14666be2c640407bbbfec7 (patch) | |
tree | 06d1b83868e4d3971b781b45607b124718ee2ec0 /arch/sh/mm/pmb.c | |
parent | 2e450643d70b62e0192577681b227d7d5d2efa45 (diff) | |
download | linux-d01447b3197c2c470a14666be2c640407bbbfec7.tar.bz2 |
sh: Merge legacy and dynamic PMB modes.
This implements a bit of rework for the PMB code, which permits us to
kill off the legacy PMB mode completely. Rather than trusting the boot
loader to do the right thing, we do a quick verification of the PMB
contents to determine whether to have the kernel setup the initial
mappings or whether it needs to mangle them later on instead.
If we're booting from legacy mappings, the kernel will now take control
of them and make them match the kernel's initial mapping configuration.
This is accomplished by breaking the initialization phase out in to
multiple steps: synchronization, merging, and resizing. With the recent
rework, the synchronization code establishes page links for compound
mappings already, so we build on top of this for promoting mappings and
reclaiming unused slots.
At the same time, the changes introduced for the uncached helpers also
permit us to dynamically resize the uncached mapping without any
particular headaches. The smallest page size is more than sufficient for
mapping all of kernel text, and as we're careful not to jump to any far
off locations in the setup code the mapping can safely be resized
regardless of whether we are executing from it or not.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 243 |
1 files changed, 207 insertions, 36 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b9d5476e1284..198bcff5e96f 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -52,7 +52,7 @@ struct pmb_entry { struct pmb_entry *link; }; -static void pmb_unmap_entry(struct pmb_entry *); +static void pmb_unmap_entry(struct pmb_entry *, int depth); static DEFINE_RWLOCK(pmb_rwlock); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; @@ -115,13 +115,14 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe = &pmb_entry_list[pos]; + memset(pmbe, 0, sizeof(struct pmb_entry)); + spin_lock_init(&pmbe->lock); pmbe->vpn = vpn; pmbe->ppn = ppn; pmbe->flags = flags; pmbe->entry = pos; - pmbe->size = 0; return pmbe; @@ -133,7 +134,9 @@ out: static void pmb_free(struct pmb_entry *pmbe) { __clear_bit(pmbe->entry, pmb_map); - pmbe->entry = PMB_NO_ENTRY; + + pmbe->entry = PMB_NO_ENTRY; + pmbe->link = NULL; } /* @@ -161,9 +164,6 @@ static __always_inline unsigned long pmb_cache_flags(void) */ static void __set_pmb_entry(struct pmb_entry *pmbe) { - pmbe->flags &= ~PMB_CACHE_MASK; - pmbe->flags |= pmb_cache_flags(); - writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); @@ -280,7 +280,7 @@ again: return wanted - size; out: - pmb_unmap_entry(pmbp); + pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); return err; } @@ -302,18 +302,40 @@ void pmb_unmap(unsigned long addr) read_unlock(&pmb_rwlock); - pmb_unmap_entry(pmbe); + pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); } -static void pmb_unmap_entry(struct pmb_entry *pmbe) +static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) { - unsigned long flags; + return (b->vpn == (a->vpn + a->size)) && + (b->ppn == (a->ppn + a->size)) && + (b->flags == a->flags); +} - if (unlikely(!pmbe)) - return; +static bool pmb_size_valid(unsigned long size) +{ + int i; - write_lock_irqsave(&pmb_rwlock, flags); + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return true; + + return false; +} + +static int pmb_size_to_flags(unsigned long size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return pmb_sizes[i].flag; + return 0; +} + +static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) +{ do { struct pmb_entry *pmblink = pmbe; @@ -332,8 +354,18 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) pmbe = pmblink->link; pmb_free(pmblink); - } while (pmbe); + } while (pmbe && --depth); +} + +static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) +{ + unsigned long flags; + if (unlikely(!pmbe)) + return; + + write_lock_irqsave(&pmb_rwlock, flags); + __pmb_unmap_entry(pmbe, depth); write_unlock_irqrestore(&pmb_rwlock, flags); } @@ -342,14 +374,40 @@ static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } -static int pmb_synchronize_mappings(void) +static void __init pmb_notify(void) { - unsigned int applied = 0; - struct pmb_entry *pmbp = NULL; - int i, j; + int i; pr_info("PMB: boot mappings:\n"); + read_lock(&pmb_rwlock); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", + pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, + pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); + } + + read_unlock(&pmb_rwlock); +} + +/* + * Sync our software copy of the PMB mappings with those in hardware. The + * mappings in the hardware PMB were either set up by the bootloader or + * very early on by the kernel. + */ +static void __init pmb_synchronize(void) +{ + struct pmb_entry *pmbp = NULL; + int i, j; + /* * Run through the initial boot mappings, log the established * ones, and blow away anything that falls outside of the valid @@ -432,10 +490,10 @@ static int pmb_synchronize_mappings(void) /* * Compare the previous entry against the current one to * see if the entries span a contiguous mapping. If so, - * setup the entry links accordingly. + * setup the entry links accordingly. Compound mappings + * are later coalesced. */ - if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && - (pmbe->ppn == (pmbp->ppn + pmbp->size))) + if (pmb_can_merge(pmbp, pmbe)) pmbp->link = pmbe; spin_unlock(&pmbp->lock); @@ -444,37 +502,150 @@ static int pmb_synchronize_mappings(void) pmbp = pmbe; spin_unlock_irqrestore(&pmbe->lock, irqflags); + } +} - pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", - vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, - (data_val & PMB_C) ? "" : "un"); +static void __init pmb_merge(struct pmb_entry *head) +{ + unsigned long span, newsize; + struct pmb_entry *tail; + int i = 1, depth = 0; + + span = newsize = head->size; - applied++; + tail = head->link; + while (tail) { + span += tail->size; + + if (pmb_size_valid(span)) { + newsize = span; + depth = i; + } + + /* This is the end of the line.. */ + if (!tail->link) + break; + + tail = tail->link; + i++; } - return (applied == 0); + /* + * The merged page size must be valid. + */ + if (!pmb_size_valid(newsize)) + return; + + head->flags &= ~PMB_SZ_MASK; + head->flags |= pmb_size_to_flags(newsize); + + head->size = newsize; + + __pmb_unmap_entry(head->link, depth); + __set_pmb_entry(head); } -int pmb_init(void) +static void __init pmb_coalesce(void) { - int ret; + unsigned long flags; + int i; + + write_lock_irqsave(&pmb_rwlock, flags); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + /* + * We're only interested in compound mappings + */ + if (!pmbe->link) + continue; + + /* + * Nothing to do if it already uses the largest possible + * page size. + */ + if (pmbe->size == SZ_512M) + continue; + + pmb_merge(pmbe); + } + + write_unlock_irqrestore(&pmb_rwlock, flags); +} + +#ifdef CONFIG_UNCACHED_MAPPING +static void __init pmb_resize(void) +{ + int i; /* - * Sync our software copy of the PMB mappings with those in - * hardware. The mappings in the hardware PMB were either set up - * by the bootloader or very early on by the kernel. + * If the uncached mapping was constructed by the kernel, it will + * already be a reasonable size. */ - ret = pmb_synchronize_mappings(); - if (unlikely(ret == 0)) - return 0; + if (uncached_size == SZ_16M) + return; + + read_lock(&pmb_rwlock); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + unsigned long flags; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + if (pmbe->vpn != uncached_start) + continue; + + /* + * Found it, now resize it. + */ + spin_lock_irqsave(&pmbe->lock, flags); + + pmbe->size = SZ_16M; + pmbe->flags &= ~PMB_SZ_MASK; + pmbe->flags |= pmb_size_to_flags(pmbe->size); + + uncached_resize(pmbe->size); + + __set_pmb_entry(pmbe); + + spin_unlock_irqrestore(&pmbe->lock, flags); + } + + read_lock(&pmb_rwlock); +} +#endif + +void __init pmb_init(void) +{ + /* Synchronize software state */ + pmb_synchronize(); + + /* Attempt to combine compound mappings */ + pmb_coalesce(); + +#ifdef CONFIG_UNCACHED_MAPPING + /* Resize initial mappings, if necessary */ + pmb_resize(); +#endif + + /* Log them */ + pmb_notify(); writel_uncached(0, PMB_IRMCR); /* Flush out the TLB */ __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); ctrl_barrier(); - - return 0; } bool __in_29bit_mode(void) |