diff options
author | Olof Johansson <olof@lixom.net> | 2007-11-20 12:24:45 +1100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-11-20 13:56:31 +1100 |
commit | fbe481756df57673b6acbcd2e139d0d2658f2188 (patch) | |
tree | f1981a79330e1eeaa2294516587bf338f4a6476e /arch/powerpc/kernel/vdso64 | |
parent | 92e21e79a85924ddda00f4678d60bbd8f891a553 (diff) | |
download | linux-fbe481756df57673b6acbcd2e139d0d2658f2188.tar.bz2 |
[POWERPC] vdso: Fixes for cache block sizes
The current VDSO implementation is hardcoded to 128 byte cache blocks,
which are only used on IBM's 64-bit processors.
Convert it to get the cache block sizes out of vdso_data instead,
similar to how the ppc64 in-kernel cache flush does it.
Signed-off-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/vdso64')
-rw-r--r-- | arch/powerpc/kernel/vdso64/cacheflush.S | 41 |
1 files changed, 29 insertions, 12 deletions
diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S index 66a36d3cc6ad..69c5af2b3c96 100644 --- a/arch/powerpc/kernel/vdso64/cacheflush.S +++ b/arch/powerpc/kernel/vdso64/cacheflush.S @@ -23,29 +23,46 @@ * * Flushes the data cache & invalidate the instruction cache for the * provided range [start, end[ - * - * Note: all CPUs supported by this kernel have a 128 bytes cache - * line size so we don't have to peek that info from the datapage */ V_FUNCTION_BEGIN(__kernel_sync_dicache) .cfi_startproc - li r5,127 - andc r6,r3,r5 /* round low to line bdy */ + mflr r12 + .cfi_register lr,r12 + mr r11,r3 + bl V_LOCAL_FUNC(__get_datapage) + mtlr r12 + mr r10,r3 + + lwz r7,CFG_DCACHE_BLOCKSZ(r10) + addi r5,r7,-1 + andc r6,r11,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ - srwi. r8,r8,7 /* compute line count */ + lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10) + srw. r8,r8,r9 /* compute line count */ crclr cr0*4+so beqlr /* nothing to do? */ mtctr r8 - mr r3,r6 -1: dcbst 0,r3 - addi r3,r3,128 +1: dcbst 0,r6 + add r6,r6,r7 bdnz 1b sync + +/* Now invalidate the instruction cache */ + + lwz r7,CFG_ICACHE_BLOCKSZ(r10) + addi r5,r7,-1 + andc r6,r11,r5 /* round low to line bdy */ + subf r8,r6,r4 /* compute length */ + add r8,r8,r5 + lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10) + srw. r8,r8,r9 /* compute line count */ + crclr cr0*4+so + beqlr /* nothing to do? */ mtctr r8 -1: icbi 0,r6 - addi r6,r6,128 - bdnz 1b +2: icbi 0,r6 + add r6,r6,r7 + bdnz 2b isync li r3,0 blr |