diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-01-18 15:12:23 +0530 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-02-15 23:16:02 +0530 |
commit | 41195d236e84458bebd4fdc218610a92231ac791 (patch) | |
tree | c0049630c1a21a071c9c942086041029ebdf2866 /arch/arc/mm/tlbex.S | |
parent | 0ef88a54aa341f754707414500158addbf35c780 (diff) | |
download | linux-41195d236e84458bebd4fdc218610a92231ac791.tar.bz2 |
ARC: SMP support
ARC common code to enable a SMP system + ISS provided SMP extensions.
ARC700 natively lacks SMP support, hence some of the core features are
are only enabled if SoCs have the necessary h/w pixie-dust. This
includes:
-Inter Processor Interrupts (IPI)
-Cache coherency
-load-locked/store-conditional
...
The low level exception handling would be completely broken in SMP
because we don't have hardware assisted stack switching. Thus a fair bit
of this code is repurposing the MMU_SCRATCH reg for event handler
prologues to keep them re-entrant.
Many thanks to Rajeshwar Ranga for his initial "major" contributions to
SMP Port (back in 2008), and to Noam Camus and Gilad Ben-Yossef for help
with resurrecting that in 3.2 kernel (2012).
Note that this platform code is again singleton design pattern - so
multiple SMP platforms won't build at the moment - this deficiency is
addressed in subsequent patches within this series.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Rajeshwar Ranga <rajeshwar.ranga@gmail.com>
Cc: Noam Camus <noamc@ezchip.com>
Cc: Gilad Ben-Yossef <gilad@benyossef.com>
Diffstat (limited to 'arch/arc/mm/tlbex.S')
-rw-r--r-- | arch/arc/mm/tlbex.S | 38 |
1 files changed, 38 insertions, 0 deletions
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 164b02169498..4b1ad2d905ca 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -57,9 +57,15 @@ .global ex_saved_reg1 .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned .type ex_saved_reg1, @object +#ifdef CONFIG_SMP + .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) +ex_saved_reg1: + .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT) +#else .size ex_saved_reg1, 16 ex_saved_reg1: .zero 16 +#endif ;============================================================================ ; Troubleshooting Stuff @@ -116,7 +122,13 @@ ex_saved_reg1: lr r2, [efa] +#ifndef CONFIG_SMP lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd +#else + GET_CURR_TASK_ON_CPU r1 + ld r1, [r1, TASK_ACT_MM] + ld r1, [r1, MM_PGD] +#endif lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr @@ -192,12 +204,28 @@ ex_saved_reg1: ; ".size ex_saved_reg1, 16" ; [All of this dance is to avoid stack switching for each TLB Miss, since we ; only need to save only a handful of regs, as opposed to complete reg file] +; +; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST +; core reg as it will not be SMP safe. +; Thus scratch AUX reg is used (and no longer used to cache task PGD). +; To save the rest of 3 regs - per cpu, the global is made "per-cpu". +; Epilogue thus has to locate the "per-cpu" storage for regs. +; To avoid cache line bouncing the per-cpu global is aligned/sized per +; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence +; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" ; As simple as that.... .macro TLBMISS_FREEUP_REGS +#ifdef CONFIG_SMP + sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with + GET_CPU_ID r0 ; get to per cpu scratch mem, + lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu + add r0, @ex_saved_reg1, r0 +#else st r0, [@ex_saved_reg1] mov_s r0, @ex_saved_reg1 +#endif st_s r1, [r0, 4] st_s r2, [r0, 8] st_s r3, [r0, 12] @@ -210,11 +238,21 @@ ex_saved_reg1: ;----------------------------------------------------------------- .macro TLBMISS_RESTORE_REGS +#ifdef CONFIG_SMP + GET_CPU_ID r0 ; get to per cpu scratch mem + lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide + add r0, @ex_saved_reg1, r0 + ld_s r3, [r0,12] + ld_s r2, [r0, 8] + ld_s r1, [r0, 4] + lr r0, [ARC_REG_SCRATCH_DATA0] +#else mov_s r0, @ex_saved_reg1 ld_s r3, [r0,12] ld_s r2, [r0, 8] ld_s r1, [r0, 4] ld_s r0, [r0] +#endif .endm .section .text, "ax",@progbits ;Fast Path Code, candidate for ICCM |