[LTP] [PATCH 05/13] Hugetlb: Migrating libhugetlbfs mremap-fixed-huge-near-normal

Tarun Sahu tsahu@linux.ibm.com
Sun Dec 25 16:42:05 CET 2022


Migrating the libhugetlbfs/testcases/mremap-fixed-huge-near-normal.c test

Test Description: The kernel has bug for mremap() on some architecture.
mremap() can cause crashes on architectures with holes in the address
space (like ia64) and on powerpc with it's distinct page size "slices".

This test get the normal mapping address and mremap() hugepage mapping
near to this normal mapping.

Signed-off-by: Tarun Sahu <tsahu@linux.ibm.com>
---
 runtest/hugetlb                               |   1 +
 testcases/kernel/mem/.gitignore               |   1 +
 .../kernel/mem/hugetlb/hugemmap/hugemmap25.c  | 146 ++++++++++++++++++
 3 files changed, 148 insertions(+)
 create mode 100644 testcases/kernel/mem/hugetlb/hugemmap/hugemmap25.c

diff --git a/runtest/hugetlb b/runtest/hugetlb
index 8ade3c9ec..65265b0fe 100644
--- a/runtest/hugetlb
+++ b/runtest/hugetlb
@@ -25,6 +25,7 @@ hugemmap20 hugemmap20
 hugemmap21 hugemmap21
 hugemmap22 hugemmap22
 hugemmap23 hugemmap23
+hugemmap25 hugemmap25
 hugemmap05_1 hugemmap05 -m
 hugemmap05_2 hugemmap05 -s
 hugemmap05_3 hugemmap05 -s -m
diff --git a/testcases/kernel/mem/.gitignore b/testcases/kernel/mem/.gitignore
index ffd831f2e..c865a1e55 100644
--- a/testcases/kernel/mem/.gitignore
+++ b/testcases/kernel/mem/.gitignore
@@ -24,6 +24,7 @@
 /hugetlb/hugemmap/hugemmap21
 /hugetlb/hugemmap/hugemmap22
 /hugetlb/hugemmap/hugemmap23
+/hugetlb/hugemmap/hugemmap25
 /hugetlb/hugeshmat/hugeshmat01
 /hugetlb/hugeshmat/hugeshmat02
 /hugetlb/hugeshmat/hugeshmat03
diff --git a/testcases/kernel/mem/hugetlb/hugemmap/hugemmap25.c b/testcases/kernel/mem/hugetlb/hugemmap/hugemmap25.c
new file mode 100644
index 000000000..f8e99faf6
--- /dev/null
+++ b/testcases/kernel/mem/hugetlb/hugemmap/hugemmap25.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
+/*
+ * Copyright (C) 2009 IBM Corporation.
+ * Author: David Gibson
+ */
+
+/*\
+ * [Description]
+ *
+ * The kernel has bug for mremap() on some architecture. mremap() can
+ * cause crashes on architectures with holes in the address space
+ * (like ia64) and on powerpc with it's distinct page size "slices".
+ *
+ * This test get the normal mapping address and mremap() hugepage mapping
+ * near to this normal mapping.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <sys/mount.h>
+#include <limits.h>
+#include <sys/param.h>
+#include <sys/types.h>
+
+#include "hugetlb.h"
+
+#define RANDOM_CONSTANT	0x1234ABCD
+#define MNTPOINT "hugetlbfs/"
+
+static int  fd = -1;
+static long hpage_size;
+
+static int do_readback(void *p, size_t size, const char *stage)
+{
+	unsigned int *q = p;
+	size_t i;
+
+	tst_res(TINFO, "%s(%p, 0x%lx, \"%s\")", __func__, p,
+	       (unsigned long)size, stage);
+
+	for (i = 0; i < (size / sizeof(*q)); i++)
+		q[i] = RANDOM_CONSTANT ^ i;
+
+	for (i = 0; i < (size / sizeof(*q)); i++) {
+		if (q[i] != (RANDOM_CONSTANT ^ i)) {
+			tst_res(TFAIL, "Stage \"%s\": Mismatch at offset 0x%lx: 0x%x "
+					"instead of 0x%lx", stage, i, q[i], RANDOM_CONSTANT ^ i);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int do_remap(int fd, void *target)
+{
+	void *a, *b;
+	int ret;
+
+	a = SAFE_MMAP(NULL, hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+
+	ret = do_readback(a, hpage_size, "base huge");
+	if (ret)
+		goto cleanup;
+	b = mremap(a, hpage_size, hpage_size, MREMAP_MAYMOVE | MREMAP_FIXED,
+		   target);
+
+	if (b != MAP_FAILED) {
+		ret = do_readback(b, hpage_size, "remapped");
+		a = b;
+	} else
+		tst_res(TINFO|TERRNO, "mremap(MAYMOVE|FIXED) disallowed");
+cleanup:
+	SAFE_MUNMAP(a, hpage_size);
+	return ret;
+}
+
+static void *map_align(size_t size, size_t align)
+{
+	unsigned long xsize = size + align - getpagesize();
+	size_t t;
+	void *p, *q;
+
+	p = SAFE_MMAP(NULL, xsize, PROT_READ|PROT_WRITE,
+		 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+
+	q = PALIGN(p, align);
+
+	t = q - p;
+	if (t)
+		SAFE_MUNMAP(p, t);
+
+	t = p + xsize - (q + size);
+	if (t)
+		SAFE_MUNMAP(q + size, t);
+
+	return q;
+}
+
+static void run_test(void)
+{
+	void *p;
+	int ret;
+
+	fd = tst_creat_unlinked(MNTPOINT, 0);
+	p = map_align(3*hpage_size, hpage_size);
+
+	SAFE_MUNMAP(p, hpage_size);
+	SAFE_MUNMAP(p + 2*hpage_size, hpage_size);
+
+	p = p + hpage_size;
+
+	tst_res(TINFO, "Normal mapping at %p", p);
+	ret = do_readback(p, hpage_size, "base normal page");
+	if (ret)
+		goto cleanup;
+	ret = do_remap(fd, p - hpage_size);
+	if (ret)
+		goto cleanup;
+	ret = do_remap(fd, p + hpage_size);
+	if (ret == 0)
+		tst_res(TPASS, "Successfully tested mremap hpage near normal mapping");
+cleanup:
+	SAFE_CLOSE(fd);
+}
+
+static void setup(void)
+{
+	hpage_size = SAFE_READ_MEMINFO(MEMINFO_HPAGE_SIZE)*1024;
+}
+
+static void cleanup(void)
+{
+	if (fd >= 0)
+		SAFE_CLOSE(fd);
+}
+
+static struct tst_test test = {
+	.needs_root = 1,
+	.mntpoint = MNTPOINT,
+	.needs_hugetlbfs = 1,
+	.needs_tmpdir = 1,
+	.setup = setup,
+	.cleanup = cleanup,
+	.test_all = run_test,
+	.hugepages = {3, TST_NEEDS},
+};
-- 
2.31.1



More information about the ltp mailing list