summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-bf561
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-04-23 21:44:42 +0200
committerThomas Gleixner <tglx@linutronix.de>2015-07-27 14:06:22 +0200
commitd835b6c4cc02507b3bf3f8ee6c86857cf0ee67ab (patch)
tree4bee7d7516eb8f382d137cde96b46fe2b6eea442 /arch/blackfin/mach-bf561
parentf8a570e270bf62363cd498ac2ac8ea07a76ad4d6 (diff)
downloadlinux-d835b6c4cc02507b3bf3f8ee6c86857cf0ee67ab.tar.bz2
blackfin: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. TODO: use inline asm or at least asm macros to collapse the lot. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/blackfin/mach-bf561')
-rw-r--r--arch/blackfin/mach-bf561/atomic.S30
1 files changed, 15 insertions, 15 deletions
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 2a08df8e8c4c..26fccb5568b9 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm)
* r0 = ptr
* r1 = value
*
- * Add a signed value to a 32bit word and return the new value atomically.
+ * ADD a signed value to a 32bit word and return the new value atomically.
* Clobbers: r3:0, p1:0
*/
-ENTRY(___raw_atomic_update_asm)
+ENTRY(___raw_atomic_add_asm)
p1 = r0;
r3 = r1;
[--sp] = rets;
@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm)
r0 = r3;
rets = [sp++];
rts;
-ENDPROC(___raw_atomic_update_asm)
+ENDPROC(___raw_atomic_add_asm)
/*
* r0 = ptr
* r1 = mask
*
- * Clear the mask bits from a 32bit word and return the old 32bit value
+ * AND the mask bits from a 32bit word and return the old 32bit value
* atomically.
* Clobbers: r3:0, p1:0
*/
-ENTRY(___raw_atomic_clear_asm)
+ENTRY(___raw_atomic_and_asm)
p1 = r0;
- r3 = ~r1;
+ r3 = r1;
[--sp] = rets;
call _get_core_lock;
r2 = [p1];
@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm)
r0 = r3;
rets = [sp++];
rts;
-ENDPROC(___raw_atomic_clear_asm)
+ENDPROC(___raw_atomic_and_asm)
/*
* r0 = ptr
* r1 = mask
*
- * Set the mask bits into a 32bit word and return the old 32bit value
+ * OR the mask bits into a 32bit word and return the old 32bit value
* atomically.
* Clobbers: r3:0, p1:0
*/
-ENTRY(___raw_atomic_set_asm)
+ENTRY(___raw_atomic_or_asm)
p1 = r0;
r3 = r1;
[--sp] = rets;
@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm)
r0 = r3;
rets = [sp++];
rts;
-ENDPROC(___raw_atomic_set_asm)
+ENDPROC(___raw_atomic_or_asm)
/*
* r0 = ptr
@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm)
r2 = r1;
r1 = 1;
r1 <<= r2;
- jump ___raw_atomic_set_asm
+ jump ___raw_atomic_or_asm
ENDPROC(___raw_bit_set_asm)
/*
@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm)
* Clobbers: r3:0, p1:0
*/
ENTRY(___raw_bit_clear_asm)
- r2 = r1;
- r1 = 1;
- r1 <<= r2;
- jump ___raw_atomic_clear_asm
+ r2 = 1;
+ r2 <<= r1;
+ r1 = ~r2;
+ jump ___raw_atomic_and_asm
ENDPROC(___raw_bit_clear_asm)
/*