summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/iio/iio_event_monitor.c2
-rw-r--r--tools/testing/kunit/configs/broken_on_uml.config1
-rwxr-xr-xtools/testing/kunit/kunit.py58
-rw-r--r--tools/testing/kunit/kunit_json.py63
-rw-r--r--tools/testing/kunit/kunit_kernel.py27
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py33
-rw-r--r--tools/testing/selftests/Makefile34
-rw-r--r--tools/testing/selftests/firmware/.gitignore2
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh91
-rw-r--r--tools/testing/selftests/lib.mk9
-rw-r--r--tools/testing/selftests/livepatch/functions.sh2
-rwxr-xr-xtools/testing/selftests/lkdtm/run.sh2
-rw-r--r--tools/testing/selftests/netfilter/.gitignore2
-rw-r--r--tools/testing/selftests/ptrace/.gitignore1
-rwxr-xr-xtools/testing/selftests/run_kselftest.sh93
-rw-r--r--tools/testing/selftests/vm/config1
16 files changed, 363 insertions, 58 deletions
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index f115d166c985..bb03859db89d 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -119,6 +119,7 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_PM2P5] = "pm2p5",
[IIO_MOD_PM4] = "pm4",
[IIO_MOD_PM10] = "pm10",
+ [IIO_MOD_O2] = "o2",
};
static bool event_is_known(struct iio_event_data *event)
@@ -211,6 +212,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_PM2P5:
case IIO_MOD_PM4:
case IIO_MOD_PM10:
+ case IIO_MOD_O2:
break;
default:
return false;
diff --git a/tools/testing/kunit/configs/broken_on_uml.config b/tools/testing/kunit/configs/broken_on_uml.config
index 239b9f03da2c..a7f0603d33f6 100644
--- a/tools/testing/kunit/configs/broken_on_uml.config
+++ b/tools/testing/kunit/configs/broken_on_uml.config
@@ -39,3 +39,4 @@
# CONFIG_QCOM_CPR is not set
# CONFIG_RESET_BRCMSTB_RESCAL is not set
# CONFIG_RESET_INTEL_GW is not set
+# CONFIG_ADI_AXI_ADC is not set
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 425ef40067e7..ebf5f5763dee 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -17,6 +17,7 @@ from collections import namedtuple
from enum import Enum, auto
import kunit_config
+import kunit_json
import kunit_kernel
import kunit_parser
@@ -30,9 +31,9 @@ KunitBuildRequest = namedtuple('KunitBuildRequest',
KunitExecRequest = namedtuple('KunitExecRequest',
['timeout', 'build_dir', 'alltests'])
KunitParseRequest = namedtuple('KunitParseRequest',
- ['raw_output', 'input_data'])
+ ['raw_output', 'input_data', 'build_dir', 'json'])
KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs',
- 'build_dir', 'alltests',
+ 'build_dir', 'alltests', 'json',
'make_options'])
KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0]
@@ -113,12 +114,22 @@ def parse_tests(request: KunitParseRequest) -> KunitResult:
test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS,
[],
'Tests not Parsed.')
+
if request.raw_output:
kunit_parser.raw_output(request.input_data)
else:
test_result = kunit_parser.parse_run_tests(request.input_data)
parse_end = time.time()
+ if request.json:
+ json_obj = kunit_json.get_json_result(
+ test_result=test_result,
+ def_config='kunit_defconfig',
+ build_dir=request.build_dir,
+ json_path=request.json)
+ if request.json == 'stdout':
+ print(json_obj)
+
if test_result.status != kunit_parser.TestStatus.SUCCESS:
return KunitResult(KunitStatus.TEST_FAILURE, test_result,
parse_end - parse_start)
@@ -151,7 +162,9 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
return exec_result
parse_request = KunitParseRequest(request.raw_output,
- exec_result.result)
+ exec_result.result,
+ request.build_dir,
+ request.json)
parse_result = parse_tests(parse_request)
run_end = time.time()
@@ -195,7 +208,12 @@ def add_exec_opts(parser):
def add_parse_opts(parser):
parser.add_argument('--raw_output', help='don\'t format output from kernel',
action='store_true')
-
+ parser.add_argument('--json',
+ nargs='?',
+ help='Stores test results in a JSON, and either '
+ 'prints to stdout or saves to file if a '
+ 'filename is specified',
+ type=str, const='stdout', default=None)
def main(argv, linux=None):
parser = argparse.ArgumentParser(
@@ -237,10 +255,16 @@ def main(argv, linux=None):
cli_args = parser.parse_args(argv)
+ if get_kernel_root_path():
+ os.chdir(get_kernel_root_path())
+
if cli_args.subcommand == 'run':
if not os.path.exists(cli_args.build_dir):
os.mkdir(cli_args.build_dir)
+ if not os.path.exists(kunit_kernel.kunitconfig_path):
+ create_default_kunitconfig()
+
if not linux:
linux = kunit_kernel.LinuxSourceTree()
@@ -249,14 +273,18 @@ def main(argv, linux=None):
cli_args.jobs,
cli_args.build_dir,
cli_args.alltests,
+ cli_args.json,
cli_args.make_options)
result = run_tests(linux, request)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
elif cli_args.subcommand == 'config':
- if cli_args.build_dir:
- if not os.path.exists(cli_args.build_dir):
- os.mkdir(cli_args.build_dir)
+ if cli_args.build_dir and (
+ not os.path.exists(cli_args.build_dir)):
+ os.mkdir(cli_args.build_dir)
+
+ if not os.path.exists(kunit_kernel.kunitconfig_path):
+ create_default_kunitconfig()
if not linux:
linux = kunit_kernel.LinuxSourceTree()
@@ -270,10 +298,6 @@ def main(argv, linux=None):
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
elif cli_args.subcommand == 'build':
- if cli_args.build_dir:
- if not os.path.exists(cli_args.build_dir):
- os.mkdir(cli_args.build_dir)
-
if not linux:
linux = kunit_kernel.LinuxSourceTree()
@@ -288,10 +312,6 @@ def main(argv, linux=None):
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
elif cli_args.subcommand == 'exec':
- if cli_args.build_dir:
- if not os.path.exists(cli_args.build_dir):
- os.mkdir(cli_args.build_dir)
-
if not linux:
linux = kunit_kernel.LinuxSourceTree()
@@ -300,7 +320,9 @@ def main(argv, linux=None):
cli_args.alltests)
exec_result = exec_tests(linux, exec_request)
parse_request = KunitParseRequest(cli_args.raw_output,
- exec_result.result)
+ exec_result.result,
+ cli_args.build_dir,
+ cli_args.json)
result = parse_tests(parse_request)
kunit_parser.print_with_timestamp((
'Elapsed time: %.3fs\n') % (
@@ -314,7 +336,9 @@ def main(argv, linux=None):
with open(cli_args.file, 'r') as f:
kunit_output = f.read().splitlines()
request = KunitParseRequest(cli_args.raw_output,
- kunit_output)
+ kunit_output,
+ cli_args.build_dir,
+ cli_args.json)
result = parse_tests(request)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py
new file mode 100644
index 000000000000..624b31b2dbd6
--- /dev/null
+++ b/tools/testing/kunit/kunit_json.py
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generates JSON from KUnit results according to
+# KernelCI spec: https://github.com/kernelci/kernelci-doc/wiki/Test-API
+#
+# Copyright (C) 2020, Google LLC.
+# Author: Heidi Fahim <heidifahim@google.com>
+
+import json
+import os
+
+import kunit_parser
+
+from kunit_parser import TestStatus
+
+def get_json_result(test_result, def_config, build_dir, json_path):
+ sub_groups = []
+
+ # Each test suite is mapped to a KernelCI sub_group
+ for test_suite in test_result.suites:
+ sub_group = {
+ "name": test_suite.name,
+ "arch": "UM",
+ "defconfig": def_config,
+ "build_environment": build_dir,
+ "test_cases": [],
+ "lab_name": None,
+ "kernel": None,
+ "job": None,
+ "git_branch": "kselftest",
+ }
+ test_cases = []
+ # TODO: Add attachments attribute in test_case with detailed
+ # failure message, see https://api.kernelci.org/schema-test-case.html#get
+ for case in test_suite.cases:
+ test_case = {"name": case.name, "status": "FAIL"}
+ if case.status == TestStatus.SUCCESS:
+ test_case["status"] = "PASS"
+ elif case.status == TestStatus.TEST_CRASHED:
+ test_case["status"] = "ERROR"
+ test_cases.append(test_case)
+ sub_group["test_cases"] = test_cases
+ sub_groups.append(sub_group)
+ test_group = {
+ "name": "KUnit Test Group",
+ "arch": "UM",
+ "defconfig": def_config,
+ "build_environment": build_dir,
+ "sub_groups": sub_groups,
+ "lab_name": None,
+ "kernel": None,
+ "job": None,
+ "git_branch": "kselftest",
+ }
+ json_obj = json.dumps(test_group, indent=4)
+ if json_path != 'stdout':
+ with open(json_path, 'w') as result_path:
+ result_path.write(json_obj)
+ root = __file__.split('tools/testing/kunit/')[0]
+ kunit_parser.print_with_timestamp(
+ "Test results stored in %s" %
+ os.path.join(root, result_path.name))
+ return json_obj
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index e20e2056cb38..b557b1e93f98 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -36,9 +36,9 @@ class LinuxSourceTreeOperations(object):
try:
subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT)
except OSError as e:
- raise ConfigError('Could not call make command: ' + e)
+ raise ConfigError('Could not call make command: ' + str(e))
except subprocess.CalledProcessError as e:
- raise ConfigError(e.output)
+ raise ConfigError(e.output.decode())
def make_olddefconfig(self, build_dir, make_options):
command = ['make', 'ARCH=um', 'olddefconfig']
@@ -49,22 +49,27 @@ class LinuxSourceTreeOperations(object):
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except OSError as e:
- raise ConfigError('Could not call make command: ' + e)
+ raise ConfigError('Could not call make command: ' + str(e))
except subprocess.CalledProcessError as e:
- raise ConfigError(e.output)
+ raise ConfigError(e.output.decode())
- def make_allyesconfig(self):
+ def make_allyesconfig(self, build_dir, make_options):
kunit_parser.print_with_timestamp(
'Enabling all CONFIGs for UML...')
+ command = ['make', 'ARCH=um', 'allyesconfig']
+ if make_options:
+ command.extend(make_options)
+ if build_dir:
+ command += ['O=' + build_dir]
process = subprocess.Popen(
- ['make', 'ARCH=um', 'allyesconfig'],
+ command,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT)
process.wait()
kunit_parser.print_with_timestamp(
'Disabling broken configs to run KUnit tests...')
with ExitStack() as es:
- config = open(KCONFIG_PATH, 'a')
+ config = open(get_kconfig_path(build_dir), 'a')
disable = open(BROKEN_ALLCONFIG_PATH, 'r').read()
config.write(disable)
kunit_parser.print_with_timestamp(
@@ -79,9 +84,9 @@ class LinuxSourceTreeOperations(object):
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except OSError as e:
- raise BuildError('Could not call execute make: ' + e)
+ raise BuildError('Could not call execute make: ' + str(e))
except subprocess.CalledProcessError as e:
- raise BuildError(e.output)
+ raise BuildError(e.output.decode())
def linux_bin(self, params, timeout, build_dir, outfile):
"""Runs the Linux UML binary. Must be named 'linux'."""
@@ -161,9 +166,9 @@ class LinuxSourceTree(object):
return self.build_config(build_dir, make_options)
def build_um_kernel(self, alltests, jobs, build_dir, make_options):
- if alltests:
- self._ops.make_allyesconfig()
try:
+ if alltests:
+ self._ops.make_allyesconfig(build_dir, make_options)
self._ops.make_olddefconfig(build_dir, make_options)
self._ops.make(jobs, build_dir, make_options)
except (ConfigError, BuildError) as e:
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 287c74d821c3..99c3c5671ea4 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -11,11 +11,13 @@ from unittest import mock
import tempfile, shutil # Handling test_tmpdir
+import json
import os
import kunit_config
import kunit_parser
import kunit_kernel
+import kunit_json
import kunit
test_tmpdir = ''
@@ -230,6 +232,37 @@ class KUnitParserTest(unittest.TestCase):
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual('kunit-resource-test', result.suites[0].name)
+class KUnitJsonTest(unittest.TestCase):
+
+ def _json_for(self, log_file):
+ with(open(get_absolute_path(log_file))) as file:
+ test_result = kunit_parser.parse_run_tests(file)
+ json_obj = kunit_json.get_json_result(
+ test_result=test_result,
+ def_config='kunit_defconfig',
+ build_dir=None,
+ json_path='stdout')
+ return json.loads(json_obj)
+
+ def test_failed_test_json(self):
+ result = self._json_for(
+ 'test_data/test_is_test_passed-failure.log')
+ self.assertEqual(
+ {'name': 'example_simple_test', 'status': 'FAIL'},
+ result["sub_groups"][1]["test_cases"][0])
+
+ def test_crashed_test_json(self):
+ result = self._json_for(
+ 'test_data/test_is_test_passed-crash.log')
+ self.assertEqual(
+ {'name': 'example_simple_test', 'status': 'ERROR'},
+ result["sub_groups"][1]["test_cases"][0])
+
+ def test_no_tests_json(self):
+ result = self._json_for(
+ 'test_data/test_is_test_passed-no_tests_run.log')
+ self.assertEqual(0, len(result['sub_groups']))
+
class StrContains(str):
def __eq__(self, other):
return self in other
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 9018f45d631d..d9c283503159 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -88,10 +88,10 @@ endif
# of the targets gets built.
FORCE_TARGETS ?=
-# Clear LDFLAGS and MAKEFLAGS if called from main
-# Makefile to avoid test build failures when test
-# Makefile doesn't have explicit build rules.
-ifeq (1,$(MAKELEVEL))
+# Clear LDFLAGS and MAKEFLAGS when implicit rules are missing. This provides
+# implicit rules to sub-test Makefiles which avoids build failures in test
+# Makefile that don't have explicit build rules.
+ifeq (,$(LINK.c))
override LDFLAGS =
override MAKEFLAGS =
endif
@@ -206,6 +206,7 @@ KSFT_INSTALL_PATH := $(abspath $(KSFT_INSTALL_PATH))
# Avoid changing the rest of the logic here and lib.mk.
INSTALL_PATH := $(KSFT_INSTALL_PATH)
ALL_SCRIPT := $(INSTALL_PATH)/run_kselftest.sh
+TEST_LIST := $(INSTALL_PATH)/kselftest-list.txt
install: all
ifdef INSTALL_PATH
@@ -214,6 +215,8 @@ ifdef INSTALL_PATH
install -m 744 kselftest/module.sh $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/runner.sh $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
+ install -m 744 run_kselftest.sh $(INSTALL_PATH)/
+ rm -f $(TEST_LIST)
@ret=1; \
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
@@ -222,33 +225,18 @@ ifdef INSTALL_PATH
ret=$$((ret * $$?)); \
done; exit $$ret;
- @# Ask all targets to emit their test scripts
- echo "#!/bin/sh" > $(ALL_SCRIPT)
- echo "BASE_DIR=\$$(realpath \$$(dirname \$$0))" >> $(ALL_SCRIPT)
- echo "cd \$$BASE_DIR" >> $(ALL_SCRIPT)
- echo ". ./kselftest/runner.sh" >> $(ALL_SCRIPT)
- echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
- echo "if [ \"\$$1\" = \"--summary\" ]; then" >> $(ALL_SCRIPT)
- echo " logfile=\$$BASE_DIR/output.log" >> $(ALL_SCRIPT)
- echo " cat /dev/null > \$$logfile" >> $(ALL_SCRIPT)
- echo "fi" >> $(ALL_SCRIPT)
- @# While building run_kselftest.sh skip also non-existent TARGET dirs:
+ @# Ask all targets to emit their test scripts
+ @# While building kselftest-list.text skip also non-existent TARGET dirs:
@# they could be the result of a build failure and should NOT be
@# included in the generated runlist.
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
[ ! -d $(INSTALL_PATH)/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
- echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
- echo "cd $$TARGET" >> $(ALL_SCRIPT); \
- echo -n "run_many" >> $(ALL_SCRIPT); \
echo -n "Emit Tests for $$TARGET\n"; \
- $(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
- echo "" >> $(ALL_SCRIPT); \
- echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
+ $(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET COLLECTION=$$TARGET \
+ -C $$TARGET emit_tests >> $(TEST_LIST); \
done;
-
- chmod u+x $(ALL_SCRIPT)
else
$(error Error: set INSTALL_PATH to use install)
endif
diff --git a/tools/testing/selftests/firmware/.gitignore b/tools/testing/selftests/firmware/.gitignore
new file mode 100644
index 000000000000..62abc92a94c4
--- /dev/null
+++ b/tools/testing/selftests/firmware/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+fw_namespace
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index fcc281373b4d..c2a2a100114b 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -149,6 +149,26 @@ config_unset_into_buf()
echo 0 > $DIR/config_into_buf
}
+config_set_buf_size()
+{
+ echo $1 > $DIR/config_buf_size
+}
+
+config_set_file_offset()
+{
+ echo $1 > $DIR/config_file_offset
+}
+
+config_set_partial()
+{
+ echo 1 > $DIR/config_partial
+}
+
+config_unset_partial()
+{
+ echo 0 > $DIR/config_partial
+}
+
config_set_sync_direct()
{
echo 1 > $DIR/config_sync_direct
@@ -207,6 +227,35 @@ read_firmwares()
done
}
+read_partial_firmwares()
+{
+ if [ "$(cat $DIR/config_into_buf)" == "1" ]; then
+ fwfile="${FW_INTO_BUF}"
+ else
+ fwfile="${FW}"
+ fi
+
+ if [ "$1" = "xzonly" ]; then
+ fwfile="${fwfile}-orig"
+ fi
+
+ # Strip fwfile down to match partial offset and length
+ partial_data="$(cat $fwfile)"
+ partial_data="${partial_data:$2:$3}"
+
+ for i in $(seq 0 3); do
+ config_set_read_fw_idx $i
+
+ read_firmware="$(cat $DIR/read_firmware)"
+
+ # Verify the contents are what we expect.
+ if [ $read_firmware != $partial_data ]; then
+ echo "request #$i: partial firmware was not loaded" >&2
+ exit 1
+ fi
+ done
+}
+
read_firmwares_expect_nofile()
{
for i in $(seq 0 3); do
@@ -242,6 +291,21 @@ test_batched_request_firmware_into_buf_nofile()
echo "OK"
}
+test_request_partial_firmware_into_buf_nofile()
+{
+ echo -n "Test request_partial_firmware_into_buf() off=$1 size=$2 nofile: "
+ config_reset
+ config_set_name nope-test-firmware.bin
+ config_set_into_buf
+ config_set_partial
+ config_set_buf_size $2
+ config_set_file_offset $1
+ config_trigger_sync
+ read_firmwares_expect_nofile
+ release_all_firmware
+ echo "OK"
+}
+
test_batched_request_firmware_direct_nofile()
{
echo -n "Batched request_firmware_direct() nofile try #$1: "
@@ -356,6 +420,21 @@ test_request_firmware_nowait_custom()
echo "OK"
}
+test_request_partial_firmware_into_buf()
+{
+ echo -n "Test request_partial_firmware_into_buf() off=$1 size=$2: "
+ config_reset
+ config_set_name $TEST_FIRMWARE_INTO_BUF_FILENAME
+ config_set_into_buf
+ config_set_partial
+ config_set_buf_size $2
+ config_set_file_offset $1
+ config_trigger_sync
+ read_partial_firmwares normal $1 $2
+ release_all_firmware
+ echo "OK"
+}
+
# Only continue if batched request triggers are present on the
# test-firmware driver
test_config_present
@@ -383,6 +462,12 @@ for i in $(seq 1 5); do
test_request_firmware_nowait_custom $i normal
done
+# Partial loads cannot use fallback, so do not repeat tests.
+test_request_partial_firmware_into_buf 0 10
+test_request_partial_firmware_into_buf 0 5
+test_request_partial_firmware_into_buf 1 6
+test_request_partial_firmware_into_buf 2 10
+
# Test for file not found, errors are expected, the failure would be
# a hung task, which would require a hard reset.
echo
@@ -407,6 +492,12 @@ for i in $(seq 1 5); do
test_request_firmware_nowait_custom_nofile $i
done
+# Partial loads cannot use fallback, so do not repeat tests.
+test_request_partial_firmware_into_buf_nofile 0 10
+test_request_partial_firmware_into_buf_nofile 0 5
+test_request_partial_firmware_into_buf_nofile 1 6
+test_request_partial_firmware_into_buf_nofile 2 10
+
test "$HAS_FW_LOADER_COMPRESS" != "yes" && exit 0
# test with both files present
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 7a17ea815736..30848ca36555 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -47,9 +47,9 @@ ARCH ?= $(SUBARCH)
khdr:
ifndef KSFT_KHDR_INSTALL_DONE
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
- make --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
+ $(MAKE) --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
else
- make --no-builtin-rules INSTALL_HDR_PATH=$$OUTPUT/usr \
+ $(MAKE) --no-builtin-rules INSTALL_HDR_PATH=$$OUTPUT/usr \
ARCH=$(ARCH) -C $(top_srcdir) headers_install
endif
endif
@@ -107,9 +107,8 @@ endif
emit_tests:
for TEST in $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS); do \
BASENAME_TEST=`basename $$TEST`; \
- echo " \\"; \
- echo -n " \"$$BASENAME_TEST\""; \
- done; \
+ echo "$(COLLECTION):$$BASENAME_TEST"; \
+ done
# define if isn't already. It is undefined in make O= case.
ifeq ($(RM),)
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
index 1aba83c87ad3..846c7ed71556 100644
--- a/tools/testing/selftests/livepatch/functions.sh
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -278,7 +278,7 @@ function check_result {
# help differentiate repeated testing runs. Remove them with a
# post-comparison sed filter.
- result=$(dmesg | comm -13 "$SAVED_DMESG" - | \
+ result=$(dmesg | comm --nocheck-order -13 "$SAVED_DMESG" - | \
grep -e 'livepatch:' -e 'test_klp' | \
grep -v '\(tainting\|taints\) kernel' | \
sed 's/^\[[ 0-9.]*\] //')
diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh
index 8383eb89d88a..bb7a1775307b 100755
--- a/tools/testing/selftests/lkdtm/run.sh
+++ b/tools/testing/selftests/lkdtm/run.sh
@@ -82,7 +82,7 @@ dmesg > "$DMESG"
($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
# Record and dump the results
-dmesg | diff --changed-group-format='%>' --unchanged-group-format='' "$DMESG" - > "$LOG" || true
+dmesg | comm --nocheck-order -13 "$DMESG" - > "$LOG" || true
cat "$LOG"
# Check for expected output
diff --git a/tools/testing/selftests/netfilter/.gitignore b/tools/testing/selftests/netfilter/.gitignore
new file mode 100644
index 000000000000..8448f74adfec
--- /dev/null
+++ b/tools/testing/selftests/netfilter/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+nf-queue
diff --git a/tools/testing/selftests/ptrace/.gitignore b/tools/testing/selftests/ptrace/.gitignore
index 7bebf9534a86..792318aaa30c 100644
--- a/tools/testing/selftests/ptrace/.gitignore
+++ b/tools/testing/selftests/ptrace/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
get_syscall_info
peeksiginfo
+vmaccess
diff --git a/tools/testing/selftests/run_kselftest.sh b/tools/testing/selftests/run_kselftest.sh
new file mode 100755
index 000000000000..609a4ef9300e
--- /dev/null
+++ b/tools/testing/selftests/run_kselftest.sh
@@ -0,0 +1,93 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run installed kselftest tests.
+#
+BASE_DIR=$(realpath $(dirname $0))
+cd $BASE_DIR
+TESTS="$BASE_DIR"/kselftest-list.txt
+if [ ! -r "$TESTS" ] ; then
+ echo "$0: Could not find list of tests to run ($TESTS)" >&2
+ available=""
+else
+ available="$(cat "$TESTS")"
+fi
+
+. ./kselftest/runner.sh
+ROOT=$PWD
+
+usage()
+{
+ cat <<EOF
+Usage: $0 [OPTIONS]
+ -s | --summary Print summary with detailed log in output.log
+ -t | --test COLLECTION:TEST Run TEST from COLLECTION
+ -c | --collection COLLECTION Run all tests from COLLECTION
+ -l | --list List the available collection:test entries
+ -d | --dry-run Don't actually run any tests
+ -h | --help Show this usage info
+EOF
+ exit $1
+}
+
+COLLECTIONS=""
+TESTS=""
+dryrun=""
+while true; do
+ case "$1" in
+ -s | --summary)
+ logfile="$BASE_DIR"/output.log
+ cat /dev/null > $logfile
+ shift ;;
+ -t | --test)
+ TESTS="$TESTS $2"
+ shift 2 ;;
+ -c | --collection)
+ COLLECTIONS="$COLLECTIONS $2"
+ shift 2 ;;
+ -l | --list)
+ echo "$available"
+ exit 0 ;;
+ -n | --dry-run)
+ dryrun="echo"
+ shift ;;
+ -h | --help)
+ usage 0 ;;
+ "")
+ break ;;
+ *)
+ usage 1 ;;
+ esac
+done
+
+# Add all selected collections to the explicit test list.
+if [ -n "$COLLECTIONS" ]; then
+ for collection in $COLLECTIONS ; do
+ found="$(echo "$available" | grep "^$collection:")"
+ if [ -z "$found" ] ; then
+ echo "No such collection '$collection'" >&2
+ exit 1
+ fi
+ TESTS="$TESTS $found"
+ done
+fi
+# Replace available test list with explicitly selected tests.
+if [ -n "$TESTS" ]; then
+ valid=""
+ for test in $TESTS ; do
+ found="$(echo "$available" | grep "^${test}$")"
+ if [ -z "$found" ] ; then
+ echo "No such test '$test'" >&2
+ exit 1
+ fi
+ valid="$valid $found"
+ done
+ available="$(echo "$valid" | sed -e 's/ /\n/g')"
+fi
+
+collections=$(echo "$available" | cut -d: -f1 | uniq)
+for collection in $collections ; do
+ [ -w /dev/kmsg ] && echo "kselftest: Running tests in $collection" >> /dev/kmsg
+ tests=$(echo "$available" | grep "^$collection:" | cut -d: -f2)
+ ($dryrun cd "$collection" && $dryrun run_many $tests)
+done
diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config
index 3ba674b64fa9..69dd0d1aa30b 100644
--- a/tools/testing/selftests/vm/config
+++ b/tools/testing/selftests/vm/config
@@ -3,3 +3,4 @@ CONFIG_USERFAULTFD=y
CONFIG_TEST_VMALLOC=m
CONFIG_DEVICE_PRIVATE=y
CONFIG_TEST_HMM=m
+CONFIG_GUP_BENCHMARK=y