diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2012-09-10 02:52:53 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-09-17 16:31:50 +1000 |
commit | ac8dc2823a30a2d166fed6e919ab2e576f8fca84 (patch) | |
tree | 5d8a89cf1254c119f1005a4ffda594a1a1df7612 /arch/powerpc/mm | |
parent | 7aa0727f3302931e698b3a7979ae5b9a4600da4e (diff) | |
download | linux-ac8dc2823a30a2d166fed6e919ab2e576f8fca84.tar.bz2 |
powerpc/mm: Use the required number of VSID bits in slbmte
ASM_VSID_SCRAMBLE can leave non-zero bits in the high 28 bits of the result
for 256MB segment (40 bits for 1T segment). Properly mask them before using
the values in slbmte
Reviewed-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index e132dc6ed1a9..3b75f19aaa22 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -223,7 +223,11 @@ _GLOBAL(slb_allocate_user) */ slb_finish_load: ASM_VSID_SCRAMBLE(r10,r9,256M) - rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ + /* + * bits above VSID_BITS_256M need to be ignored from r10 + * also combine VSID and flags + */ + rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M)) /* r3 = EA, r11 = VSID data */ /* @@ -287,7 +291,11 @@ _GLOBAL(slb_compare_rr_to_size) slb_finish_load_1T: srdi r10,r10,40-28 /* get 1T ESID */ ASM_VSID_SCRAMBLE(r10,r9,1T) - rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */ + /* + * bits above VSID_BITS_1T need to be ignored from r10 + * also combine VSID and flags + */ + rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T)) li r10,MMU_SEGSIZE_1T rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ |