diff options
author | Matt Evans <matt@ozlabs.org> | 2011-04-06 19:48:50 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-04-27 14:18:52 +1000 |
commit | 44ae3ab3358e962039c36ad4ae461ae9fb29596c (patch) | |
tree | 08c0628a5226c0535b7fe236be64b48e5eb0fbd6 /arch/powerpc/mm | |
parent | eca590f402332ab873d13f2d8d00fa0b91cfff36 (diff) | |
download | linux-44ae3ab3358e962039c36ad4ae461ae9fb29596c.tar.bz2 |
powerpc: Free up some CPU feature bits by moving out MMU-related features
Some of the 64bit PPC CPU features are MMU-related, so this patch moves
them to MMU_FTR_ bits. All cpu_has_feature()-style tests are moved to
mmu_has_feature(), and seven feature bits are freed as a result.
Signed-off-by: Matt Evans <matt@ozlabs.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 18 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/stab.c | 2 |
7 files changed, 25 insertions, 25 deletions
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 5b7dd4ea02b5..a242b5d7cbe4 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -118,7 +118,7 @@ _GLOBAL(__hash_page_4K) BEGIN_FTR_SECTION cmpdi r9,0 /* check segment size */ bne 3f -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* Calc va and put it in r29 */ rldicr r29,r5,28,63-28 rldicl r3,r3,0,36 @@ -401,7 +401,7 @@ _GLOBAL(__hash_page_4K) BEGIN_FTR_SECTION cmpdi r9,0 /* check segment size */ bne 3f -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* Calc va and put it in r29 */ rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ @@ -715,7 +715,7 @@ BEGIN_FTR_SECTION andi. r0,r31,_PAGE_NO_CACHE /* If so, bail out and refault as a 4k page */ bne- ht64_bail_ok -END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE) /* Prepare new PTE value (turn access RW into DIRTY, then * add BUSY and ACCESSED) */ @@ -736,7 +736,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) BEGIN_FTR_SECTION cmpdi r9,0 /* check segment size */ bne 3f -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* Calc va and put it in r29 */ rldicr r29,r5,28,63-28 rldicl r3,r3,0,36 diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 784a400e0781..c23eef2b81a6 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -98,8 +98,8 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize) static inline void tlbie(unsigned long va, int psize, int ssize, int local) { - unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL); - int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); + unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); + int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); if (use_local) use_local = mmu_psize_defs[psize].tlbiel; @@ -503,7 +503,7 @@ static void native_flush_hash_range(unsigned long number, int local) } pte_iterate_hashed_end(); } - if (cpu_has_feature(CPU_FTR_TLBIEL) && + if (mmu_has_feature(MMU_FTR_TLBIEL) && mmu_psize_defs[psize].tlbiel && local) { asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { @@ -517,7 +517,7 @@ static void native_flush_hash_range(unsigned long number, int local) } asm volatile("ptesync":::"memory"); } else { - int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); + int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) raw_spin_lock(&native_tlbie_lock); diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index d95d8f484d2f..26b2872b3d00 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -259,11 +259,11 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node, for (; size >= 4; size -= 4, ++prop) { if (prop[0] == 40) { DBG("1T segment support detected\n"); - cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT; + cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; return 1; } } - cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B; + cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; return 0; } @@ -289,7 +289,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, if (prop != NULL) { DBG("Page sizes from device-tree:\n"); size /= 4; - cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE); + cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); while(size > 0) { unsigned int shift = prop[0]; unsigned int slbenc = prop[1]; @@ -317,7 +317,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, break; case 0x18: idx = MMU_PAGE_16M; - cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE; + cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; break; case 0x22: idx = MMU_PAGE_16G; @@ -412,7 +412,7 @@ static void __init htab_init_page_sizes(void) * Not in the device-tree, let's fallback on known size * list for 16M capable GP & GR */ - if (cpu_has_feature(CPU_FTR_16M_PAGE)) + if (mmu_has_feature(MMU_FTR_16M_PAGE)) memcpy(mmu_psize_defs, mmu_psize_defaults_gp, sizeof(mmu_psize_defaults_gp)); found: @@ -442,7 +442,7 @@ static void __init htab_init_page_sizes(void) mmu_vmalloc_psize = MMU_PAGE_64K; if (mmu_linear_psize == MMU_PAGE_4K) mmu_linear_psize = MMU_PAGE_64K; - if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) { + if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { /* * Don't use 64k pages for ioremap on pSeries, since * that would stop us accessing the HEA ethernet. @@ -608,7 +608,7 @@ static void __init htab_initialize(void) /* Initialize page sizes */ htab_init_page_sizes(); - if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { + if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { mmu_kernel_ssize = MMU_SEGSIZE_1T; mmu_highuser_ssize = MMU_SEGSIZE_1T; printk(KERN_INFO "Using 1TB segments\n"); @@ -749,7 +749,7 @@ void __init early_init_mmu(void) /* Initialize stab / SLB management except on iSeries */ - if (cpu_has_feature(CPU_FTR_SLB)) + if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize(); else if (!firmware_has_feature(FW_FEATURE_ISERIES)) stab_initialize(get_paca()->stab_real); @@ -766,7 +766,7 @@ void __cpuinit early_init_mmu_secondary(void) * in real mode on pSeries and we want a virtual address on * iSeries anyway */ - if (cpu_has_feature(CPU_FTR_SLB)) + if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize(); else stab_initialize(get_paca()->stab_addr); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 9bb249c3046e..0b9a5c1901b9 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -529,7 +529,7 @@ static int __init hugetlbpage_init(void) { int psize; - if (!cpu_has_feature(CPU_FTR_16M_PAGE)) + if (!mmu_has_feature(MMU_FTR_16M_PAGE)) return -ENODEV; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 5500712781d4..e22276cb67a4 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -167,7 +167,7 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) int esid_1t_count; /* System is not 1T segment size capable. */ - if (!cpu_has_feature(CPU_FTR_1T_SEGMENT)) + if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) return (GET_ESID(addr1) == GET_ESID(addr2)); esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + @@ -202,7 +202,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) */ hard_irq_disable(); offset = get_paca()->slb_cache_ptr; - if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && + if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && offset <= SLB_CACHE_ENTRIES) { int i; asm volatile("isync" : : : "memory"); diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 95ce35581696..ef653dc95b65 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -58,7 +58,7 @@ _GLOBAL(slb_miss_kernel_load_linear) li r11,0 BEGIN_FTR_SECTION b slb_finish_load -END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) b slb_finish_load_1T 1: @@ -87,7 +87,7 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) 6: BEGIN_FTR_SECTION b slb_finish_load -END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) b slb_finish_load_1T 0: /* user address: proto-VSID = context << 15 | ESID. First check @@ -138,11 +138,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) ld r9,PACACONTEXTID(r13) BEGIN_FTR_SECTION cmpldi r10,0x1000 -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) rldimi r10,r9,USER_ESID_BITS,0 BEGIN_FTR_SECTION bge slb_finish_load_1T -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 446a01842a73..41e31642a86a 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -243,7 +243,7 @@ void __init stabs_alloc(void) { int cpu; - if (cpu_has_feature(CPU_FTR_SLB)) + if (mmu_has_feature(MMU_FTR_SLB)) return; for_each_possible_cpu(cpu) { |