[LTP] [PATCH v2] Test Description: Tests the behavior of shared memory when multiple threads attach to a segment with different permissions. At one stage, the reservation accounting of free hugepages allocated to parent and child's process may mess up during the memory operations.
Spoorthy
spoorthy@linux.ibm.com
Fri May 10 16:06:14 CEST 2024
Creates 4 hugepages in parent and a shared memory segment of size segment_size with 0640 permission,
The segment attaches to process address space and it is partially initialized by filling four parts of the segment with a pattern(0x55).
The segment is detached from the process's address space.
Next create child processes in a loop, Each child process reattaches to the shared memory segment as read-only by calling attach_segment() with the SHM_RDONLY flag. After attaching, the child process detaches from the segment using shmdt() and exits. If either attach or detach fails, the test fails
Also if the reservation accounting calculated by comparing the free hugepage memory in parent's and child's process does not match, the test fails
The parent process waits for all the child processes to exit. If all child processes exit successfully and the reservation accounting of parent and childprocess matches then the test passes
Tested and verified the success of test case
Signed-off-by: Spoorthy <spoorthy@linux.ibm.com>
-------
Changes in v2:
1)Make check errors are taken care
2)segment_size is not static
3)Added check to compare the free hugepage memory
-------
---
runtest/hugetlb | 1 +
testcases/kernel/mem/.gitignore | 1 +
.../mem/hugetlb/hugeshmat/hugeshmat06.c | 105 ++++++++++++++++++
3 files changed, 107 insertions(+)
create mode 100644 testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c
diff --git a/runtest/hugetlb b/runtest/hugetlb
index 299c07ac9..240701b2b 100644
--- a/runtest/hugetlb
+++ b/runtest/hugetlb
@@ -44,6 +44,7 @@ hugeshmat02 hugeshmat02 -i 5
hugeshmat03 hugeshmat03 -i 5
hugeshmat04 hugeshmat04 -i 5
hugeshmat05 hugeshmat05 -i 5
+hugeshmat06 hugeshmat06
hugeshmctl01 hugeshmctl01 -i 5
hugeshmctl02 hugeshmctl02 -i 5
diff --git a/testcases/kernel/mem/.gitignore b/testcases/kernel/mem/.gitignore
index c96fe8bfc..4ad1dc313 100644
--- a/testcases/kernel/mem/.gitignore
+++ b/testcases/kernel/mem/.gitignore
@@ -39,6 +39,7 @@
/hugetlb/hugeshmat/hugeshmat03
/hugetlb/hugeshmat/hugeshmat04
/hugetlb/hugeshmat/hugeshmat05
+/hugetlb/hugeshmat/hugeshmat06
/hugetlb/hugeshmctl/hugeshmctl01
/hugetlb/hugeshmctl/hugeshmctl02
/hugetlb/hugeshmctl/hugeshmctl03
diff --git a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c
new file mode 100644
index 000000000..8b669fb18
--- /dev/null
+++ b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2005-2006, IBM Corporation.
+ * Author: David Gibson & Adam Litke
+ */
+/*\
+ * [DESCRIPTION]
+ * Test shared memory behavior when multiple threads are attached
+ * to a segment with different permissions. A segment is created
+ * and children attach read-only to check reservation accounting.
+ */
+
+#include "hugetlb.h"
+#include "tst_safe_sysv_ipc.h"
+
+#define SEGMENT_KEY (0x82ba15ff)
+#define MNTPOINT "hugetlbfs/"
+
+static int global_shmid = -1;
+static void *shmaddr;
+static long segment_size, hpage_size, stride;
+
+static int attach_segment(size_t segsize, int shmflags, int shmperms)
+{
+ int shmid;
+
+ shmid = SAFE_SHMGET(SEGMENT_KEY, segsize, shmflags);
+ shmaddr = SAFE_SHMAT(shmid, shmaddr, shmperms);
+ global_shmid = shmid;
+ return shmid;
+}
+
+static void setup(void)
+{
+ hpage_size = tst_get_hugepage_size();
+ segment_size = 4 * hpage_size;
+ stride = hpage_size;
+ if (hpage_size > segment_size)
+ tst_res(TCONF, "Page size is too large for configured segment_size");
+}
+
+static void compare_free_hugepage_memory(long free_end, long free_start)
+{
+ if (free_end != free_start) {
+ tst_res(TFAIL, "Free hugepages allocated after multiple threads attached is not equal to initial free hugepages allocated");
+ exit(EXIT_FAILURE);
+ }
+}
+
+static void run_test(void)
+{
+ char *p;
+ int i, iterations;
+ pid_t *wait_list;
+ long total_hpages, free_start, free_end, val;
+
+ total_hpages = SAFE_READ_MEMINFO(MEMINFO_HPAGE_TOTAL);
+ iterations = (total_hpages * hpage_size) / segment_size+1;
+ wait_list = SAFE_MALLOC(sizeof(pid_t) * iterations);
+ attach_segment(segment_size, IPC_CREAT | SHM_HUGETLB | 0640, 0);
+ p = (char *)shmaddr;
+ for (i = 0; i < 4; i++, p += stride)
+ memset(p, 0x55, stride);
+ free_start = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
+ SAFE_SHMDT((const void *)shmaddr);
+ for (i = 0; i < iterations; i++) {
+ pid_t pid;
+
+ pid = SAFE_FORK();
+ if (pid) {
+ wait_list[i] = pid;
+ } else {
+ attach_segment(0, 0, SHM_RDONLY);
+ for (i = 0; i < 4; i++)
+ val = *((char *)shmaddr + (i * hpage_size));
+ SAFE_SHMDT(((const void *)shmaddr));
+ free_end = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
+ compare_free_hugepage_memory(free_end, free_start);
+ exit(EXIT_SUCCESS);
+ }
+ }
+ free_end = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
+ compare_free_hugepage_memory(free_end, free_start);
+ for (i = 0; i < iterations; i++)
+ tst_reap_children();
+ tst_res(TPASS, "Successfully tested shared memory behavior when multiple threads are attached");
+}
+
+static void cleanup(void)
+{
+ if (global_shmid >= 0)
+ SAFE_SHMCTL(global_shmid, IPC_RMID, NULL);
+}
+
+static struct tst_test test = {
+ .needs_root = 1,
+ .mntpoint = MNTPOINT,
+ .needs_hugetlbfs = 1,
+ .needs_tmpdir = 1,
+ .forks_child = 1,
+ .setup = setup,
+ .cleanup = cleanup,
+ .test_all = run_test,
+ .hugepages = {32, TST_NEEDS},
+};
--
2.39.3
More information about the ltp
mailing list