summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorNick Desaulniers <ndesaulniers@google.com>2022-10-18 10:21:55 -0700
committerDave Hansen <dave.hansen@linux.intel.com>2022-11-01 15:44:07 -0700
commitbce5a1e8a34006a5e80213ede5e5c465d53f1dce (patch)
treede5ce0c4e9c1634048a029768c64dfb1de8e88ee /lib
parent30a0b95b1335e12efef89dd78518ed3e4a71a763 (diff)
downloadlinux-bce5a1e8a34006a5e80213ede5e5c465d53f1dce.tar.bz2
x86/mem: Move memmove to out of line assembler
When building ARCH=i386 with CONFIG_LTO_CLANG_FULL=y, it's possible (depending on additional configs which I have not been able to isolate) to observe a failure during register allocation: error: inline assembly requires more registers than available when memmove is inlined into tcp_v4_fill_cb() or tcp_v6_fill_cb(). memmove is quite large and probably shouldn't be inlined due to size alone. A noinline function attribute would be the simplest fix, but there's a few things that stand out with the current definition: In addition to having complex constraints that can't always be resolved, the clobber list seems to be missing %bx. By using numbered operands rather than symbolic operands, the constraints are quite obnoxious to refactor. Having a large function be 99% inline asm is a code smell that this function should simply be written in stand-alone out-of-line assembler. Moving this to out of line assembler guarantees that the compiler cannot inline calls to memmove. This has been done previously for 64b: commit 9599ec0471de ("x86-64, mem: Convert memmove() to assembly file and fix return value bug") That gives the opportunity for other cleanups like fixing the inconsistent use of tabs vs spaces and instruction suffixes, and the label 3 appearing twice. Symbolic operands, local labels, and additional comments would provide this code with a fresh coat of paint. Finally, add a test that tickles the `rep movsl` implementation to test it for correctness, since it has implicit operands. Suggested-by: Ingo Molnar <mingo@kernel.org> Suggested-by: David Laight <David.Laight@aculab.com> Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Kees Cook <keescook@chromium.org> Tested-by: Kees Cook <keescook@chromium.org> Tested-by: Nathan Chancellor <nathan@kernel.org> Link: https://lore.kernel.org/all/20221018172155.287409-1-ndesaulniers%40google.com
Diffstat (limited to 'lib')
-rw-r--r--lib/memcpy_kunit.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 2b5cc70ac53f..7513e6d5dc90 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -105,6 +105,8 @@ static void memcpy_test(struct kunit *test)
#undef TEST_OP
}
+static unsigned char larger_array [2048];
+
static void memmove_test(struct kunit *test)
{
#define TEST_OP "memmove"
@@ -179,6 +181,26 @@ static void memmove_test(struct kunit *test)
ptr = &overlap.data[2];
memmove(ptr, overlap.data, 5);
compare("overlapping write", overlap, overlap_expected);
+
+ /* Verify larger overlapping moves. */
+ larger_array[256] = 0xAAu;
+ /*
+ * Test a backwards overlapping memmove first. 256 and 1024 are
+ * important for i386 to use rep movsl.
+ */
+ memmove(larger_array, larger_array + 256, 1024);
+ KUNIT_ASSERT_EQ(test, larger_array[0], 0xAAu);
+ KUNIT_ASSERT_EQ(test, larger_array[256], 0x00);
+ KUNIT_ASSERT_NULL(test,
+ memchr(larger_array + 1, 0xaa, ARRAY_SIZE(larger_array) - 1));
+ /* Test a forwards overlapping memmove. */
+ larger_array[0] = 0xBBu;
+ memmove(larger_array + 256, larger_array, 1024);
+ KUNIT_ASSERT_EQ(test, larger_array[0], 0xBBu);
+ KUNIT_ASSERT_EQ(test, larger_array[256], 0xBBu);
+ KUNIT_ASSERT_NULL(test, memchr(larger_array + 1, 0xBBu, 256 - 1));
+ KUNIT_ASSERT_NULL(test,
+ memchr(larger_array + 257, 0xBBu, ARRAY_SIZE(larger_array) - 257));
#undef TEST_OP
}