summaryrefslogtreecommitdiffstats
path: root/arch/arc/lib
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2014-02-07 13:47:43 +0530
committerVineet Gupta <vgupta@synopsys.com>2014-03-26 14:31:28 +0530
commitec7ac6afd07b2d958aab9dfc0a686300b856922a (patch)
treea4705a35138437a44c8dc99939e36cfda4d34473 /arch/arc/lib
parentd6579e99bc448a351279cea7469ab64a00d25ff8 (diff)
downloadlinux-ec7ac6afd07b2d958aab9dfc0a686300b856922a.tar.bz2
ARC: switch to generic ENTRY/END assembler annotations
With commit 9df62f054406 "arch: use ASM_NL instead of ';'" the generic macros can handle the arch specific newline quirk. Hence we can get rid of ARC asm macros and use the "C" style macros. Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/lib')
-rw-r--r--arch/arc/lib/memcmp.S6
-rw-r--r--arch/arc/lib/memcpy-700.S6
-rw-r--r--arch/arc/lib/memset.S10
-rw-r--r--arch/arc/lib/strchr-700.S6
-rw-r--r--arch/arc/lib/strcmp.S6
-rw-r--r--arch/arc/lib/strcpy-700.S6
-rw-r--r--arch/arc/lib/strlen.S6
7 files changed, 23 insertions, 23 deletions
diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
index bc813d55b6c3..978bf8314dfb 100644
--- a/arch/arc/lib/memcmp.S
+++ b/arch/arc/lib/memcmp.S
@@ -6,7 +6,7 @@
* published by the Free Software Foundation.
*/
-#include <asm/linkage.h>
+#include <linux/linkage.h>
#ifdef __LITTLE_ENDIAN__
#define WORD2 r2
@@ -16,7 +16,7 @@
#define SHIFT r2
#endif
-ARC_ENTRY memcmp
+ENTRY(memcmp)
or r12,r0,r1
asl_s r12,r12,30
sub r3,r2,1
@@ -121,4 +121,4 @@ ARC_ENTRY memcmp
.Lnil:
j_s.d [blink]
mov r0,0
-ARC_EXIT memcmp
+END(memcmp)
diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
index b64cc10ac918..3222573e50de 100644
--- a/arch/arc/lib/memcpy-700.S
+++ b/arch/arc/lib/memcpy-700.S
@@ -6,9 +6,9 @@
* published by the Free Software Foundation.
*/
-#include <asm/linkage.h>
+#include <linux/linkage.h>
-ARC_ENTRY memcpy
+ENTRY(memcpy)
or r3,r0,r1
asl_s r3,r3,30
mov_s r5,r0
@@ -63,4 +63,4 @@ ARC_ENTRY memcpy
.Lendbloop:
j_s.d [blink]
stb r12,[r5,0]
-ARC_EXIT memcpy
+END(memcpy)
diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
index 9b2d88d2e141..d36bd43fc98d 100644
--- a/arch/arc/lib/memset.S
+++ b/arch/arc/lib/memset.S
@@ -6,11 +6,11 @@
* published by the Free Software Foundation.
*/
-#include <asm/linkage.h>
+#include <linux/linkage.h>
#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */
-ARC_ENTRY memset
+ENTRY(memset)
mov_s r4,r0
or r12,r0,r2
bmsk.f r12,r12,1
@@ -46,14 +46,14 @@ ARC_ENTRY memset
stb.ab r1,[r4,1]
.Ltiny_end:
j_s [blink]
-ARC_EXIT memset
+END(memset)
; memzero: @r0 = mem, @r1 = size_t
; memset: @r0 = mem, @r1 = char, @r2 = size_t
-ARC_ENTRY memzero
+ENTRY(memzero)
; adjust bzero args to memset args
mov r2, r1
mov r1, 0
b memset ;tail call so need to tinker with blink
-ARC_EXIT memzero
+END(memzero)
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 9c548c7cf001..b725d5862107 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -11,9 +11,9 @@
presence of the norm instruction makes it easier to operate on whole
words branch-free. */
-#include <asm/linkage.h>
+#include <linux/linkage.h>
-ARC_ENTRY strchr
+ENTRY(strchr)
extb_s r1,r1
asl r5,r1,8
bmsk r2,r0,1
@@ -130,4 +130,4 @@ ARC_ENTRY strchr
j_s.d [blink]
mov.mi r0,0
#endif /* ENDIAN */
-ARC_EXIT strchr
+END(strchr)
diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
index 5dc802b45cf3..3544600fefe6 100644
--- a/arch/arc/lib/strcmp.S
+++ b/arch/arc/lib/strcmp.S
@@ -13,9 +13,9 @@
source 1; however, that would increase the overhead for loop setup / finish,
and strcmp might often terminate early. */
-#include <asm/linkage.h>
+#include <linux/linkage.h>
-ARC_ENTRY strcmp
+ENTRY(strcmp)
or r2,r0,r1
bmsk_s r2,r2,1
brne r2,0,.Lcharloop
@@ -93,4 +93,4 @@ ARC_ENTRY strcmp
.Lcmpend:
j_s.d [blink]
sub r0,r2,r3
-ARC_EXIT strcmp
+END(strcmp)
diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
index b7ca4ae81d88..8422f38e1218 100644
--- a/arch/arc/lib/strcpy-700.S
+++ b/arch/arc/lib/strcpy-700.S
@@ -16,9 +16,9 @@
there, but the it is not likely to be taken often, and it
would also be likey to cost an unaligned mispredict at the next call. */
-#include <asm/linkage.h>
+#include <linux/linkage.h>
-ARC_ENTRY strcpy
+ENTRY(strcpy)
or r2,r0,r1
bmsk_s r2,r2,1
brne.d r2,0,charloop
@@ -67,4 +67,4 @@ charloop:
brne.d r3,0,charloop
stb.ab r3,[r10,1]
j [blink]
-ARC_EXIT strcpy
+END(strcpy)
diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
index 39759e099696..53cfd5685a5f 100644
--- a/arch/arc/lib/strlen.S
+++ b/arch/arc/lib/strlen.S
@@ -6,9 +6,9 @@
* published by the Free Software Foundation.
*/
-#include <asm/linkage.h>
+#include <linux/linkage.h>
-ARC_ENTRY strlen
+ENTRY(strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
@@ -80,4 +80,4 @@ ARC_ENTRY strlen
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
-ARC_EXIT strlen
+END(strlen)