summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/c-r4k.c
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@mips.com>2019-10-08 18:22:00 +0000
committerPaul Burton <paul.burton@mips.com>2019-10-09 12:47:56 -0700
commit6baaeadae911ba9cedfead881f3bf305a18fd011 (patch)
treec840078b52ea16af8e3bf7ddb46831cdc49dad33 /arch/mips/mm/c-r4k.c
parenta14bf1dc494aa5126e4f23ebd9fa04991133814e (diff)
downloadlinux-6baaeadae911ba9cedfead881f3bf305a18fd011.tar.bz2
MIPS: Provide unroll() macro, use it for cache ops
Currently we have a lot of duplication in asm/r4kcache.h to handle manually unrolling loops of cache ops for various line sizes, and we have to explicitly handle the difference in cache op immediate width between MIPSr6 & earlier ISA revisions with further duplication. Introduce an unroll() macro in asm/unroll.h which expands to a switch statement which is used to call a function or expand a preprocessor macro a compile-time constant number of times in a row - effectively explicitly unrolling a loop. We make use of this here to remove the cache op duplication & will use it further in later patches. A nice side effect of this is that calculating the cache op offset immediate is now the compiler's responsibility, so we're no longer sensitive to the width change of that immediate in MIPSr6 & will be similarly agnostic to immediate width in any future supported ISA. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
Diffstat (limited to 'arch/mips/mm/c-r4k.c')
-rw-r--r--arch/mips/mm/c-r4k.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4bf990633135..378cbb02dcdd 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -271,12 +271,14 @@ static inline void tx49_blast_icache32(void)
/* I'm in even chunk. blast odd chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
CACHE32_UNROLL32_ALIGN;
/* I'm in odd chunk. blast even chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
}
static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
@@ -302,12 +304,14 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
/* I'm in even chunk. blast odd chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
CACHE32_UNROLL32_ALIGN;
/* I'm in odd chunk. blast even chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
}
static void (* r4k_blast_icache_page)(unsigned long addr);