diff options
author | Ben Hutchings <ben@decadent.org.uk> | 2015-11-02 12:22:22 +0000 |
---|---|---|
committer | Shuah Khan <shuahkh@osg.samsung.com> | 2015-11-03 16:55:02 -0700 |
commit | ee00479d6702393bf5eda94775349a2e28ba78fa (patch) | |
tree | 827a6acbec8d25be8fe245a9e41faede53f80a10 /tools | |
parent | 3b4d3819eca5787bae77314851a799ecbf0da02b (diff) | |
download | linux-ee00479d6702393bf5eda94775349a2e28ba78fa.tar.bz2 |
selftests: vm: Try harder to allocate huge pages
If we need to increase the number of huge pages, drop caches first
to reduce fragmentation and then check that we actually allocated
as many as we wanted. Retry once if that doesn't work.
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Shuah Khan <shuahkh@osg.samsung.com>
Diffstat (limited to 'tools')
-rwxr-xr-x | tools/testing/selftests/vm/run_vmtests | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index 9179ce8df485..97ed1b26293f 100755 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests @@ -20,13 +20,26 @@ done < /proc/meminfo if [ -n "$freepgs" ] && [ -n "$pgsize" ]; then nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` needpgs=`expr $needmem / $pgsize` - if [ $freepgs -lt $needpgs ]; then + tries=2 + while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do lackpgs=$(( $needpgs - $freepgs )) + echo 3 > /proc/sys/vm/drop_caches echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages if [ $? -ne 0 ]; then echo "Please run this test as root" exit 1 fi + while read name size unit; do + if [ "$name" = "HugePages_Free:" ]; then + freepgs=$size + fi + done < /proc/meminfo + tries=$((tries - 1)) + done + if [ $freepgs -lt $needpgs ]; then + printf "Not enough huge pages available (%d < %d)\n" \ + $freepgs $needpgs + exit 1 fi else echo "no hugetlbfs support in kernel?" |