[LTP] [PATCH 18/29] Hugetlb: Migrating libhugetlbfs mmap-cow

Tarun Sahu tsahu@linux.ibm.com
Sun Oct 16 14:57:20 CEST 2022


Migrating the libhugetlbfs/testcases/mmap-cow.c test

Test Description: Tests copy-on-write semantics of large pages where a
number of threads map the same file with the MAP_PRIVATE flag. The threads
then write into their copy of the mapping and recheck the contents to
ensure they were not corrupted by the other threads.

Signed-off-by: Tarun Sahu <tsahu@linux.ibm.com>
---
 runtest/hugetlb                               |   1 +
 testcases/kernel/mem/.gitignore               |   1 +
 .../kernel/mem/hugetlb/hugemmap/hugemmap21.c  | 222 ++++++++++++++++++
 3 files changed, 224 insertions(+)
 create mode 100644 testcases/kernel/mem/hugetlb/hugemmap/hugemmap21.c

diff --git a/runtest/hugetlb b/runtest/hugetlb
index 2dffa8421..449fad56a 100644
--- a/runtest/hugetlb
+++ b/runtest/hugetlb
@@ -22,6 +22,7 @@ hugemmap17 hugemmap17
 hugemmap18 hugemmap18
 hugemmap19 hugemmap19
 hugemmap20 hugemmap20
+hugemmap21 hugemmap21 -T 2 -s 5
 hugemmap05_1 hugemmap05 -m
 hugemmap05_2 hugemmap05 -s
 hugemmap05_3 hugemmap05 -s -m
diff --git a/testcases/kernel/mem/.gitignore b/testcases/kernel/mem/.gitignore
index dfd372892..74edfa392 100644
--- a/testcases/kernel/mem/.gitignore
+++ b/testcases/kernel/mem/.gitignore
@@ -21,6 +21,7 @@
 /hugetlb/hugemmap/hugemmap18
 /hugetlb/hugemmap/hugemmap19
 /hugetlb/hugemmap/hugemmap20
+/hugetlb/hugemmap/hugemmap21
 /hugetlb/hugeshmat/hugeshmat01
 /hugetlb/hugeshmat/hugeshmat02
 /hugetlb/hugeshmat/hugeshmat03
diff --git a/testcases/kernel/mem/hugetlb/hugemmap/hugemmap21.c b/testcases/kernel/mem/hugetlb/hugemmap/hugemmap21.c
new file mode 100644
index 000000000..adc76df44
--- /dev/null
+++ b/testcases/kernel/mem/hugetlb/hugemmap/hugemmap21.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
+/*
+ * Copyright (C) 2005-2006 David Gibson & Adam Litke, IBM Corporation.
+ *
+ * Test Name: mmap COW
+ *
+ * Test Description: Tests copy-on-write semantics of large pages where a
+ * number of threads map the same file with the MAP_PRIVATE flag. The threads
+ * then write into their copy of the mapping and recheck the contents to
+ * ensure they were not corrupted by the other threads.
+ *
+ * HISTORY
+ *  Written by David Gibson & Adam Litke
+ *
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <sys/mount.h>
+#include <limits.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/shm.h>
+#include <sys/wait.h>
+
+#include "hugetlb.h"
+
+#define BUF_SZ 256
+
+#define CHILD_FAIL(thread, fmt, ...) \
+	do { \
+		if (verbose) \
+			tst_res(TINFO|TERRNO, "Thread %d (pid=%d) FAIL: " fmt, \
+			       thread, getpid(), ##__VA_ARGS__); \
+		exit(1); \
+	} while (0)
+
+/* Setup Configuration */
+static int nr_hugepages;	/* Number of huge pages to allocate */
+static int threads;	/* Number of threads to run */
+static char *threads_opt;
+static char *verbose;
+static char hfile[MAXPATHLEN];
+static int fd = -1;
+static long hpage_size;
+
+static int mmap_file(int fd, char **addr, size_t size, int type)
+{
+	int flags = 0;
+
+	/* No SAFE_MMAP use here, because test failure is based on the
+	 * output of mmap
+	 */
+	*addr = mmap(NULL, size, PROT_READ|PROT_WRITE, flags | type, fd, 0);
+	if (*addr == MAP_FAILED)
+		return -1;
+
+	return 0;
+}
+
+static void do_work(int thread, size_t size, int fd)
+{
+	char *addr;
+	size_t i;
+	char pattern = thread+65;
+
+	if (mmap_file(fd, &addr, size, MAP_PRIVATE))
+		CHILD_FAIL(thread, "mmap() failed");
+
+	if (verbose)
+		tst_res(TINFO, "Thread %d (pid=%d): Mapped at address %p",
+		       thread, getpid(), addr);
+
+	/* Write to the mapping with a distinct pattern */
+	if (verbose)
+		tst_res(TINFO, "Thread %d (pid=%d): Writing %c to the mapping",
+		       thread, getpid(), pattern);
+	for (i = 0; i < size; i++)
+		memcpy((char *)addr+i, &pattern, 1);
+
+	if (msync(addr, size, MS_SYNC))
+		CHILD_FAIL(thread, "msync() failed");
+
+	/* Verify the pattern */
+	for (i = 0; i < size; i++)
+		if (addr[i] != pattern)
+			CHILD_FAIL(thread, "Corruption at %p; "
+				   "Got %c, Expected %c",
+				   &addr[i], addr[i], pattern);
+	if (verbose)
+		tst_res(TINFO, "Thread %d (pid=%d): Pattern verified\n",
+		       thread, getpid());
+
+	/* Munmap the area */
+	SAFE_MUNMAP(addr, size);
+	SAFE_CLOSE(fd);
+	exit(0);
+}
+
+static void run_test(void)
+{
+	char *addr;
+	size_t size, itr;
+	int i, pid, status;
+	pid_t *wait_list;
+
+	wait_list = SAFE_MALLOC(threads * sizeof(pid_t));
+
+	/* Have to have enough available hugepages for each thread to
+	 * get its own copy, plus one for the parent/page-cache
+	 */
+	size = (nr_hugepages / (threads+1)) * hpage_size;
+	if (verbose)
+		tst_res(TINFO, "hpage_size is %lx, Size is %zu, threads: %u\n",
+		       hpage_size, size, threads);
+
+	/* First, open the file */
+	fd = SAFE_OPEN(hfile, O_RDWR | O_CREAT, 0600);
+	SAFE_UNLINK(hfile);
+
+	/* First, mmap the file with MAP_SHARED and fill with data
+	 * If this is not done, then the fault handler will not be
+	 * called in the kernel since private mappings will be
+	 * created for the children at prefault time.
+	 */
+	if (mmap_file(fd, &addr, size, MAP_SHARED)) {
+		tst_res(TFAIL|TERRNO, "Failed to create shared mapping");
+		goto fail;
+	}
+
+	for (itr = 0; itr < size; itr += 8)
+		memcpy(addr+itr, "deadbeef", 8);
+
+	for (i = 0; i < threads; i++) {
+		pid = SAFE_FORK();
+
+		if (pid == 0)
+			do_work(i, size, fd);
+
+		wait_list[i] = pid;
+	}
+	for (i = 0; i < threads; i++) {
+		SAFE_WAITPID(wait_list[i], &status, 0);
+		if (WEXITSTATUS(status) != 0) {
+			tst_res(TFAIL, "Thread %d (pid=%d) failed", i, wait_list[i]);
+			goto fail;
+		}
+
+		if (WIFSIGNALED(status)) {
+			tst_res(TFAIL, "Thread %d (pid=%d) received unhandled signal", i,
+			     wait_list[i]);
+			goto fail;
+		}
+	}
+
+	SAFE_MUNMAP(addr, size);
+	SAFE_CLOSE(fd);
+	free(wait_list);
+
+	tst_res(TPASS, "Successful");
+	return;
+
+fail:
+	tst_brk(TBROK, "Once failed, No point in continuing the test");
+}
+
+static void setup(void)
+{
+	int ret;
+
+	if (!Hopt)
+		Hopt = tst_get_tmpdir();
+	SAFE_MOUNT("none", Hopt, "hugetlbfs", 0, NULL);
+
+	snprintf(hfile, sizeof(hfile), "%s/ltp_hugetlbfile%d", Hopt, getpid());
+
+	hpage_size = SAFE_READ_MEMINFO("Hugepagesize:")*1024;
+
+	if (!(threads_opt) || !(nr_opt))
+		tst_brk(TCONF, "Usage: -T <# threads> -s <# pages>\n");
+
+	ret = tst_parse_int(threads_opt, &threads, 1, INT_MAX);
+	if (ret || threads <= 0)
+		tst_brk(TCONF, "Invalid thread argument");
+	ret = tst_parse_int(nr_opt, &nr_hugepages, 1, INT_MAX);
+	if (ret || nr_hugepages <= 0)
+		tst_brk(TCONF, "Invalid pages argument");
+
+	if ((threads+1) > nr_hugepages)
+		tst_brk(TCONF, "Need more hugepages than threads\n");
+
+}
+
+static void cleanup(void)
+{
+	if (fd >= 1)
+		SAFE_CLOSE(fd);
+	umount2(Hopt, MNT_DETACH);
+}
+
+static struct tst_test test = {
+	.needs_root = 1,
+	.needs_tmpdir = 1,
+	.options = (struct tst_option[]) {
+		{"v", &verbose, "Turns on verbose mode"},
+		{"T:", &threads_opt, "Number of threads"},
+		{"H:", &Hopt,   "Location of hugetlbfs, i.e.  -H /var/hugetlbfs"},
+		{"s:", &nr_opt, "Set the number of the been allocated hugepages"},
+		{}
+	},
+	.forks_child = 1,
+	.setup = setup,
+	.cleanup = cleanup,
+	.test_all = run_test,
+	.hugepages = {2, TST_REQUEST},
+};
-- 
2.31.1



More information about the ltp mailing list