[LTP] [PATCH v7 3/4] Hugetlb: Migrating libhugetlbfs chunk-overcommit
Tarun Sahu
tsahu@linux.ibm.com
Fri Nov 4 07:27:15 CET 2022
Migrating the libhugetlbfs/testcases/chunk-overcommit.c test
Test Description: Some kernel versions after hugepage demand allocation was
added used a dubious heuristic to check if there was enough hugepage space
available for a given mapping. The number of not-already-instantiated
pages in the mapping was compared against the total hugepage free pool. It
was very easy to confuse this heuristic into overcommitting by allocating
hugepage memory in chunks, each less than the total available pool size but
together more than available. This would generally lead to OOM SIGKILLs of
one process or another when it tried to instantiate pages beyond the
available pool.
Signed-off-by: Tarun Sahu <tsahu@linux.ibm.com>
---
runtest/hugetlb | 1 +
testcases/kernel/mem/.gitignore | 1 +
.../kernel/mem/hugetlb/hugemmap/hugemmap08.c | 146 ++++++++++++++++++
3 files changed, 148 insertions(+)
create mode 100644 testcases/kernel/mem/hugetlb/hugemmap/hugemmap08.c
diff --git a/runtest/hugetlb b/runtest/hugetlb
index f7ff81cb3..664f18827 100644
--- a/runtest/hugetlb
+++ b/runtest/hugetlb
@@ -4,6 +4,7 @@ hugemmap04 hugemmap04
hugemmap05 hugemmap05
hugemmap06 hugemmap06
hugemmap07 hugemmap07
+hugemmap08 hugemmap08
hugemmap05_1 hugemmap05 -m
hugemmap05_2 hugemmap05 -s
hugemmap05_3 hugemmap05 -s -m
diff --git a/testcases/kernel/mem/.gitignore b/testcases/kernel/mem/.gitignore
index df5256ec8..003ce422b 100644
--- a/testcases/kernel/mem/.gitignore
+++ b/testcases/kernel/mem/.gitignore
@@ -5,6 +5,7 @@
/hugetlb/hugemmap/hugemmap05
/hugetlb/hugemmap/hugemmap06
/hugetlb/hugemmap/hugemmap07
+/hugetlb/hugemmap/hugemmap08
/hugetlb/hugeshmat/hugeshmat01
/hugetlb/hugeshmat/hugeshmat02
/hugetlb/hugeshmat/hugeshmat03
diff --git a/testcases/kernel/mem/hugetlb/hugemmap/hugemmap08.c b/testcases/kernel/mem/hugetlb/hugemmap/hugemmap08.c
new file mode 100644
index 000000000..3efabc4aa
--- /dev/null
+++ b/testcases/kernel/mem/hugetlb/hugemmap/hugemmap08.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
+/*
+ * Copyright (C) 2005-2006 David Gibson & Adam Litke, IBM Corporation.
+ * Author: David Gibson & Adam Litke
+ */
+
+/*\
+ * [Description]
+ *
+ * Some kernel versions after hugepage demand allocation was added used a
+ * dubious heuristic to check if there was enough hugepage space available
+ * for a given mapping. The number of not-already-instantiated pages in
+ * the mapping was compared against the total hugepage free pool. It was
+ * very easy to confuse this heuristic into overcommitting by allocating
+ * hugepage memory in chunks, each less than the total available pool size
+ * but together more than available. This would generally lead to OOM
+ * SIGKILLs of one process or another when it tried to instantiate pages
+ * beyond the available pool.
+ *
+ * HISTORY
+ *
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mount.h>
+#include <limits.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+
+#include "hugetlb.h"
+
+#define MNTPOINT "hugetlbfs/"
+#define WITH_OVERCOMMIT 0
+#define WITHOUT_OVERCOMMIT 1
+
+static long hpage_size;
+static int huge_fd = -1;
+
+static void test_chunk_overcommit(void)
+{
+ unsigned long totpages, chunk1, chunk2;
+ void *p, *q;
+ pid_t child;
+ int status;
+
+ totpages = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
+
+ chunk1 = (totpages / 2) + 1;
+ chunk2 = totpages - chunk1 + 1;
+
+ tst_res(TINFO, "Free: %ld hugepages available: "
+ "chunk1=%ld chunk2=%ld", totpages, chunk1, chunk2);
+
+ p = SAFE_MMAP(NULL, chunk1*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED,
+ huge_fd, 0);
+
+ q = mmap(NULL, chunk2*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED,
+ huge_fd, chunk1*hpage_size);
+ if (q == MAP_FAILED) {
+ if (errno != ENOMEM) {
+ tst_res(TFAIL | TERRNO, "mmap() chunk2");
+ goto cleanup1;
+ } else {
+ tst_res(TPASS, "Successful without overcommit pages");
+ goto cleanup1;
+ }
+ }
+
+ tst_res(TINFO, "Looks like we've overcommitted, testing...");
+ /* Looks like we're overcommited, but we need to confirm that
+ * this is bad. We touch it all in a child process because an
+ * overcommit will generally lead to a SIGKILL which we can't
+ * handle, of course.
+ */
+ child = SAFE_FORK();
+
+ if (child == 0) {
+ memset(p, 0, chunk1*hpage_size);
+ memset(q, 0, chunk2*hpage_size);
+ exit(0);
+ }
+
+ SAFE_WAITPID(child, &status, 0);
+
+ if (WIFSIGNALED(status)) {
+ tst_res(TFAIL, "Killed by signal '%s' due to overcommit",
+ tst_strsig(WTERMSIG(status)));
+ goto cleanup2;
+ }
+
+ tst_res(TPASS, "Successful with overcommit pages");
+
+cleanup2:
+ SAFE_MUNMAP(q, chunk2*hpage_size);
+
+cleanup1:
+ SAFE_MUNMAP(p, chunk1*hpage_size);
+ SAFE_FTRUNCATE(huge_fd, 0);
+}
+
+static void run_test(unsigned int test_type)
+{
+ switch (test_type) {
+ case WITHOUT_OVERCOMMIT:
+ tst_res(TINFO, "Without overcommit testing...");
+ SAFE_FILE_PRINTF(PATH_OC_HPAGES, "%d", 0);
+ break;
+ case WITH_OVERCOMMIT:
+ tst_res(TINFO, "With overcommit testing...");
+ SAFE_FILE_PRINTF(PATH_OC_HPAGES, "%d", 2);
+ break;
+ }
+ test_chunk_overcommit();
+}
+
+static void setup(void)
+{
+ hpage_size = SAFE_READ_MEMINFO(MEMINFO_HPAGE_SIZE)*1024;
+ huge_fd = tst_creat_unlinked(MNTPOINT);
+}
+
+static void cleanup(void)
+{
+ SAFE_CLOSE(huge_fd);
+}
+
+static struct tst_test test = {
+ .needs_root = 1,
+ .mntpoint = MNTPOINT,
+ .needs_hugetlbfs = 1,
+ .forks_child = 1,
+ .save_restore = (const struct tst_path_val[]) {
+ {PATH_OC_HPAGES, NULL},
+ {}
+ },
+ .tcnt = 2,
+ .setup = setup,
+ .cleanup = cleanup,
+ .test = run_test,
+ .hugepages = {3, TST_NEEDS},
+};
+
--
2.31.1
More information about the ltp
mailing list