[LTP] [PATCH 5/8] Cleanup mmap18 test
Andrea Cervesato
andrea.cervesato@suse.de
Fri Feb 7 15:50:34 CET 2025
From: Andrea Cervesato <andrea.cervesato@suse.com>
Signed-off-by: Andrea Cervesato <andrea.cervesato@suse.com>
---
testcases/kernel/syscalls/mmap/mmap18.c | 96 ++++++++++++++++-----------------
1 file changed, 47 insertions(+), 49 deletions(-)
diff --git a/testcases/kernel/syscalls/mmap/mmap18.c b/testcases/kernel/syscalls/mmap/mmap18.c
index b37b29890ca009ea671b29e81e02fc1e42b44dbb..d920a50ae33badfb005143eb06c5aad17c1e72a3 100644
--- a/testcases/kernel/syscalls/mmap/mmap18.c
+++ b/testcases/kernel/syscalls/mmap/mmap18.c
@@ -1,74 +1,72 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) Zilogic Systems Pvt. Ltd., 2020
- * Email: code@zilogic.com
+ * Email: code@zilogic.com
+ * Copyright (C) 2025 SUSE LLC Andrea Cervesato <andrea.cervesato@suse.com>
*/
-/*
- * Test mmap() MAP_GROWSDOWN flag
+/*\
+ * [Description]
+ *
+ * Verify mmap() syscall using MAP_GROWSDOWN flag.
*
- * # Test1:
+ * [Algorithm]
*
- * We assign the memory region partially allocated with MAP_GROWSDOWN flag to
- * a thread as a stack and expect the mapping to grow when we touch the
- * guard page by calling a recusive function in the thread that uses the
- * growable mapping as a stack.
+ * **Test 1**
*
- * The kernel only grows the memory region when the stack pointer is within
- * guard page when the guard page is touched so simply faulting the guard
- * page will not cause the mapping to grow.
+ * We assign the memory region partially allocated with MAP_GROWSDOWN flag to
+ * a thread as a stack and expect the mapping to grow when we touch the
+ * guard page by calling a recusive function in the thread that uses the
+ * growable mapping as a stack.
*
- * Newer kernels does not allow a MAP_GROWSDOWN mapping to grow closer than
- * 'stack_guard_gap' pages to an existing mapping. So when we map the stack we
- * make sure there is enough of free address space before the lowest stack
- * address.
+ * The kernel only grows the memory region when the stack pointer is within
+ * guard page when the guard page is touched so simply faulting the guard
+ * page will not cause the mapping to grow.
*
- * Kernel default 'stack_guard_gap' size is '256 * getpagesize()'.
+ * Newer kernels does not allow a MAP_GROWSDOWN mapping to grow closer than
+ * 'stack_guard_gap' pages to an existing mapping. So when we map the stack we
+ * make sure there is enough of free address space before the lowest stack
+ * address.
*
- * The stack memory map would look like:
+ * Kernel default `stack_guard_gap` size is `256 * getpagesize()`.
*
- * | - - - reserved size - - - |
+ * The stack memory map would look like:
*
- * +-- - - - --+------------+-------------+
- * | 256 pages | unmapped | mapped |
- * +-- - - - --+------------+-------------+
- * | mapped size |
- * ^ | - - stack size - - |
- * start
- * ^ ^
- * stack bottom stack top
+ * | - - - reserved size - - - |
*
- * # Test2:
+ * +-- - - - --+------------+-------------+
+ * | 256 pages | unmapped | mapped |
+ * +-- - - - --+------------+-------------+
+ * | mapped size |
+ * ^ | - - stack size - - |
+ * start
+ * ^ ^
+ * stack bottom stack top
*
- * We allocate stack as we do in the first test but we mmap a page in the
- * space the stack is supposed to grow into and we expect the thread to
- * segfault when the guard page is faulted.
+ * **Test 2**
+ *
+ * We allocate stack as we do in the first test but we mmap a page in the
+ * space the stack is supposed to grow into and we expect the thread to
+ * segfault when the guard page is faulted.
*/
-#include <unistd.h>
#include <pthread.h>
-#include <sys/mman.h>
-#include <sys/wait.h>
-#include <sys/types.h>
-#include <stdlib.h>
-#include <stdbool.h>
-
#include "tst_test.h"
#include "tst_safe_pthread.h"
static long page_size;
-static bool __attribute__((noinline)) check_stackgrow_up(void)
+static bool __attribute_noinline__ check_stackgrow_up(void)
{
char local_var;
static char *addr;
- if (!addr) {
- addr = &local_var;
- return check_stackgrow_up();
- }
+ if (!addr) {
+ addr = &local_var;
+ return check_stackgrow_up();
+ }
- return (addr < &local_var);
+ return (addr < &local_var);
}
static void setup(void)
@@ -90,7 +88,7 @@ static void *allocate_stack(size_t stack_size, size_t mapped_size)
long reserved_size = 256 * page_size + stack_size;
start = SAFE_MMAP(NULL, reserved_size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
SAFE_MUNMAP(start, reserved_size);
SAFE_MMAP((start + reserved_size - mapped_size), mapped_size, PROT_READ | PROT_WRITE,
@@ -103,12 +101,12 @@ static void *allocate_stack(size_t stack_size, size_t mapped_size)
tst_res(TINFO, "start = %p, stack_top = %p, stack bottom = %p",
start, stack_top, stack_bottom);
tst_res(TINFO, "mapped pages %zu, stack pages %zu",
- mapped_size/page_size, stack_size/page_size);
+ mapped_size/page_size, stack_size/page_size);
return stack_bottom;
}
-static __attribute__((noinline)) void *check_depth_recursive(void *limit)
+static __attribute_noinline__ void *check_depth_recursive(void *limit)
{
if ((off_t) &limit < (off_t) limit) {
tst_res(TINFO, "&limit = %p, limit = %p", &limit, limit);
@@ -192,10 +190,10 @@ static void grow_stack_fail(size_t stack_size, size_t mapped_size)
}
SAFE_WAIT(&wstatus);
- if (WIFSIGNALED(wstatus) && WTERMSIG(wstatus) == SIGSEGV)
+ if (WIFSIGNALED(wstatus) && WTERMSIG(wstatus) == SIGSEGV)
tst_res(TPASS, "Child killed by %s as expected", tst_strsig(SIGSEGV));
- else
- tst_res(TFAIL, "Child: %s", tst_strstatus(wstatus));
+ else
+ tst_res(TFAIL, "Child: %s", tst_strstatus(wstatus));
}
static void run_test(void)
--
2.43.0
More information about the ltp
mailing list