[LTP] [PATCH] hugemmap34: change how test finds suitable huge page and stack for test
Jan Stancek
jstancek@redhat.com
Thu Dec 12 10:29:24 CET 2024
The test is trying to find a place for huge page with mmap(MAP_FIXED_NOREPLACE)
starting at top of stack and going downwards until it succeeds. The problem
is that before it finds one, it goes over other existing mappings. So, child
process does crash when trying to extend stack, but not because it hits
mapped huge page, it hits one of the existing mappings. This search is slow,
specially on -debug kernels.
This patch changes the test, by giving child process a separate stack that is
always immediately above a huge page. Setup temporarily allocates huge pages and
uses those addresses to map potential child stacks. When it allocates huge page
and finds that mapping after it is one of child stacks, it uses that pair
for the test.
Signed-off-by: Jan Stancek <jstancek@redhat.com>
---
.../kernel/mem/hugetlb/hugemmap/hugemmap34.c | 151 ++++++++++++++----
1 file changed, 118 insertions(+), 33 deletions(-)
diff --git a/testcases/kernel/mem/hugetlb/hugemmap/hugemmap34.c b/testcases/kernel/mem/hugetlb/hugemmap/hugemmap34.c
index a7a88fbb2d9e..91b070a8462e 100644
--- a/testcases/kernel/mem/hugetlb/hugemmap/hugemmap34.c
+++ b/testcases/kernel/mem/hugetlb/hugemmap/hugemmap34.c
@@ -20,25 +20,32 @@
* huge page-only segment -- resulting in bugs.
*/
+#define _GNU_SOURCE
#include "lapi/mmap.h"
#include "hugetlb.h"
#include <errno.h>
+#include <inttypes.h>
+#include <sched.h>
#ifdef __LP64__
#define STACK_ALLOCATION_SIZE (256*1024*1024)
#else
#define STACK_ALLOCATION_SIZE (16*1024*1024)
#endif
-#define PALIGN(p, a) ((void *)LTP_ALIGN((unsigned long)(p), (a)))
#define MNTPOINT "hugetlbfs/"
-static int fd = -1;
-static unsigned long long hpage_size;
-static int page_size;
+#define PATH_HUGEPAGE "/sys/kernel/mm/hugepages"
+
+#define STACKS_MAX 64
+static int fd = -1;
+static uintptr_t hpage_size, stacks[STACKS_MAX];
+static int stacks_num;
+static void *hpage_addr, *stack_addr;
+static void **shared_area;
-void do_child(void *stop_address)
+int do_child(void *stop_address)
{
- volatile int *x;
+ volatile char *x;
/* corefile from this process is not interesting and limiting
* its size can save a lot of time. '1' is a special value,
@@ -46,57 +53,77 @@ void do_child(void *stop_address)
* sets limit to RLIM_INFINITY.
*/
tst_no_corefile(1);
+ tst_res(TINFO, "Child process starting with top of stack at %p", &x);
do {
x = alloca(STACK_ALLOCATION_SIZE);
+ *shared_area = (void *)x;
*x = 1;
} while ((void *)x >= stop_address);
+ exit(0);
}
static void run_test(void)
{
int pid, status;
- void *stack_address, *mmap_address, *heap_address, *map;
- stack_address = alloca(0);
- heap_address = sbrk(0);
-
- /*
- * paranoia: start mapping two hugepages below the start of the stack,
- * in case the alignment would cause us to map over something if we
- * only used a gap of one hugepage.
- */
- mmap_address = PALIGN(stack_address - 2 * hpage_size, hpage_size);
- do {
- map = mmap(mmap_address, hpage_size, PROT_READ|PROT_WRITE,
- MAP_SHARED | MAP_FIXED_NOREPLACE, fd, 0);
- if (map == MAP_FAILED) {
- if (errno == ENOMEM) {
- tst_res(TCONF, "There is no enough memory in the system to do mmap");
- return;
- }
- }
- mmap_address -= hpage_size;
- /*
- * if we get all the way down to the heap, stop trying
- */
- } while (mmap_address <= heap_address);
- pid = SAFE_FORK();
+ pid = ltp_clone(CLONE_VM | CLONE_VFORK | SIGCHLD, do_child,
+ hpage_addr, hpage_size, stack_addr);
if (pid == 0)
- do_child(mmap_address);
+ do_child(hpage_addr);
SAFE_WAITPID(pid, &status, 0);
+ tst_res(TINFO, "Child process extended stack up to %p, failed at %p",
+ *shared_area, *shared_area - STACK_ALLOCATION_SIZE);
if (WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV)
tst_res(TPASS, "Child killed by %s as expected", tst_strsig(SIGSEGV));
else
tst_res(TFAIL, "Child: %s", tst_strstatus(status));
}
+/* Return start address of next mapping or 0 */
+static uintptr_t get_next_mapping_start(uintptr_t addr)
+{
+ FILE *fp = fopen("/proc/self/maps", "r");
+
+ if (fp == NULL)
+ tst_brk(TBROK | TERRNO, "Failed to open /proc/self/maps.");
+
+ while (!feof(fp)) {
+ uintptr_t start, end;
+ int ret;
+
+ ret = fscanf(fp, "%"PRIxPTR"-%"PRIxPTR" %*[^\n]\n", &start, &end);
+ if (ret != 2) {
+ fclose(fp);
+ tst_brk(TBROK | TERRNO, "Couldn't parse /proc/self/maps line.");
+ }
+
+ if (start > addr) {
+ fclose(fp);
+ return start;
+ }
+ }
+ fclose(fp);
+ return 0;
+}
+
+static int is_addr_in_stacks(uintptr_t addr)
+{
+ int i;
+
+ for (i = 0; i < stacks_num; i++) {
+ if (addr == stacks[i])
+ return 1;
+ }
+ return 0;
+}
+
void setup(void)
{
struct rlimit r;
+ int i;
- page_size = getpagesize();
hpage_size = tst_get_hugepage_size();
/*
* Setting the stack size to unlimited.
@@ -107,7 +134,65 @@ void setup(void)
SAFE_GETRLIMIT(RLIMIT_STACK, &r);
if (r.rlim_cur != RLIM_INFINITY)
tst_brk(TCONF, "Stack rlimit must be 'unlimited'");
+
+ if (access(PATH_HUGEPAGE, F_OK))
+ tst_brk(TCONF, "hugetlbfs is not supported");
+
fd = tst_creat_unlinked(MNTPOINT, 0);
+
+ /* shared memory to pass a (void *) from child process */
+ shared_area = SAFE_MMAP(0, getpagesize(), PROT_READ|PROT_WRITE,
+ MAP_SHARED|MAP_ANONYMOUS, -1, 0);
+
+ /*
+ * We search for potential stack addresses by looking at
+ * places where kernel would map next huge page and occupying that
+ * address as potential stack. When huge page lands in such place
+ * that next mapping is one of our stack mappings, we use those
+ * two for the test. We try to map huge pages in child process so that
+ * slices in parent are not affected.
+ */
+ tst_res(TINFO, "searching for huge page and child stack placement");
+ for (i = 0; i < STACKS_MAX; i++) {
+ uintptr_t next_start;
+ int pid, status;
+
+ pid = SAFE_FORK();
+ if (pid == 0) {
+ *shared_area = SAFE_MMAP(0, hpage_size, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE, fd, 0);
+ SAFE_MUNMAP(*shared_area, hpage_size);
+ exit(0);
+ }
+ SAFE_WAITPID(pid, &status, 0);
+ if (status != 0)
+ tst_brk(TFAIL, "Child: %s", tst_strstatus(status));
+
+ next_start = get_next_mapping_start((uintptr_t)*shared_area);
+ if (is_addr_in_stacks(next_start)) {
+ stack_addr = (void *)next_start;
+ hpage_addr = *shared_area;
+ break;
+ }
+
+ tst_res(TINFO, "potential stack at address %p", *shared_area);
+ stacks[stacks_num++] = (uintptr_t) SAFE_MMAP(*shared_area, hpage_size,
+ PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE|MAP_GROWSDOWN, -1, 0);
+ }
+
+ if (!stack_addr)
+ tst_brk(TCONF, "failed to find good place for huge page and stack");
+
+ hpage_addr = mmap(hpage_addr, hpage_size, PROT_READ|PROT_WRITE,
+ MAP_SHARED|MAP_FIXED, fd, 0);
+ if (hpage_addr == MAP_FAILED) {
+ if (errno == ENOMEM)
+ tst_brk(TCONF, "Not enough memory.");
+ tst_brk(TBROK|TERRNO, "mmap failed");
+ }
+ tst_res(TINFO, "huge page is at address %p", hpage_addr);
+ tst_res(TINFO, "using stack is at address %p", stack_addr);
}
void cleanup(void)
--
2.43.0
More information about the ltp
mailing list