diff options
author | Max Filippov <jcmvbkbc@gmail.com> | 2021-07-25 16:31:34 -0700 |
---|---|---|
committer | Max Filippov <jcmvbkbc@gmail.com> | 2021-10-18 22:19:34 -0700 |
commit | d191323bc02370667fede74228135440efd98d2b (patch) | |
tree | f1fa31aaeb0dea91d6f2a9e65c8c39a3a367f1fd /arch/xtensa | |
parent | eda8dd1224d6c1c89eb6b687264da9ccfbffb0fd (diff) | |
download | linux-d191323bc02370667fede74228135440efd98d2b.tar.bz2 |
xtensa: don't use a12 in strncpy_user
a12 is callee-saved register in xtensa call0 ABI, so a function must not
change it. a10 is not used in this function at all, use it instead of
a12 to avoid saving/restoring it.
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Diffstat (limited to 'arch/xtensa')
-rw-r--r-- | arch/xtensa/lib/strncpy_user.S | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S index 4faf46fe3f38..0731912227d3 100644 --- a/arch/xtensa/lib/strncpy_user.S +++ b/arch/xtensa/lib/strncpy_user.S @@ -45,7 +45,6 @@ # a9/ tmp # a10/ tmp # a11/ dst -# a12/ tmp .text ENTRY(__strncpy_user) @@ -61,7 +60,7 @@ ENTRY(__strncpy_user) bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned .Lsrcaligned: # return here when src is word-aligned - srli a12, a4, 2 # number of loop iterations with 4B per loop + srli a10, a4, 2 # number of loop iterations with 4B per loop movi a9, 3 bnone a11, a9, .Laligned j .Ldstunaligned @@ -102,11 +101,11 @@ EX(10f) s8i a9, a11, 0 # store byte 0 .byte 0 # (0 mod 4 alignment for LBEG) .Laligned: #if XCHAL_HAVE_LOOPS - loopnez a12, .Loop1done + loopnez a10, .Loop1done #else - beqz a12, .Loop1done - slli a12, a12, 2 - add a12, a12, a11 # a12 = end of last 4B chunck + beqz a10, .Loop1done + slli a10, a10, 2 + add a10, a10, a11 # a10 = end of last 4B chunck #endif .Loop1: EX(11f) l32i a9, a3, 0 # get word from src @@ -118,7 +117,7 @@ EX(10f) s32i a9, a11, 0 # store word to dst bnone a9, a8, .Lz3 # if byte 3 is zero addi a11, a11, 4 # advance dst pointer #if !XCHAL_HAVE_LOOPS - blt a11, a12, .Loop1 + blt a11, a10, .Loop1 #endif .Loop1done: @@ -185,7 +184,7 @@ EX(10f) s8i a9, a11, 2 loopnez a4, .Lunalignedend #else beqz a4, .Lunalignedend - add a12, a11, a4 # a12 = ending address + add a10, a11, a4 # a10 = ending address #endif /* XCHAL_HAVE_LOOPS */ .Lnextbyte: EX(11f) l8ui a9, a3, 0 @@ -194,7 +193,7 @@ EX(10f) s8i a9, a11, 0 beqz a9, .Lunalignedend addi a11, a11, 1 #if !XCHAL_HAVE_LOOPS - blt a11, a12, .Lnextbyte + blt a11, a10, .Lnextbyte #endif .Lunalignedend: |