[LTP] [PATCH] Fixed hugeshmat05 Test Failure with 1GB Hugepages.
Pavithra
pavrampu@linux.ibm.com
Mon Apr 6 17:50:04 CEST 2026
Modified the test to detect large hugepage sizes (≥1GB) and adjust test
sizes accordingly.
Signed-off-by: Pavithra <pavrampu@linux.ibm.com>
---
.../mem/hugetlb/hugeshmat/hugeshmat05.c | 45 +++++++++++++++----
1 file changed, 36 insertions(+), 9 deletions(-)
diff --git a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c
index 3b2ae351c..870a61ec1 100644
--- a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c
+++ b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat05.c
@@ -37,9 +37,13 @@ void setup(void)
{
page_size = getpagesize();
hpage_size = SAFE_READ_MEMINFO("Hugepagesize:") * 1024;
+
+ tst_res(TINFO, "Page size: %ld bytes", page_size);
+ tst_res(TINFO, "Hugepage size: %ld bytes (%ld MB)",
+ hpage_size, hpage_size / (1024 * 1024));
}
-void shm_test(int size)
+void shm_test(long size)
{
int shmid;
char *shmaddr;
@@ -56,7 +60,7 @@ void shm_test(int size)
}
shmaddr[0] = 1;
- tst_res(TINFO, "allocated %d huge bytes", size);
+ tst_res(TINFO, "allocated %ld huge bytes", size);
if (shmdt((const void *)shmaddr) != 0) {
shmctl(shmid, IPC_RMID, NULL);
@@ -69,16 +73,39 @@ void shm_test(int size)
static void test_hugeshmat(void)
{
unsigned int i;
+ long tst_sizes[4];
+
+ /*
+ * For large hugepage sizes (e.g., 1GB), we need to ensure
+ * test sizes are within reasonable bounds and properly aligned.
+ * The original test used N*hpage_size which could be 4GB for 1GB pages.
+ *
+ * We adjust the test to use N/2 multiplier for large hugepages
+ * to avoid excessive memory requirements while still testing the
+ * alignment boundary conditions with multiple pages.
+ */
+ if (hpage_size >= 1024 * 1024 * 1024) {
+ /* For 1GB or larger hugepages, use N/2 pages (2 pages for N=4) */
+ long multiplier = N / 2;
- const int tst_sizes[] = {
- N * hpage_size - page_size,
- N * hpage_size - page_size - 1,
- hpage_size,
- hpage_size + 1
- };
+ tst_sizes[0] = multiplier * hpage_size - page_size;
+ tst_sizes[1] = multiplier * hpage_size - page_size - 1;
+ tst_sizes[2] = hpage_size;
+ tst_sizes[3] = hpage_size + 1;
+ tst_res(TINFO, "Using N/2=%ld hugepage test sizes for large hugepages", multiplier);
+ } else {
+ /* For smaller hugepages (2MB, 16MB, etc.), use original test */
+ tst_sizes[0] = N * hpage_size - page_size;
+ tst_sizes[1] = N * hpage_size - page_size - 1;
+ tst_sizes[2] = hpage_size;
+ tst_sizes[3] = hpage_size + 1;
+ tst_res(TINFO, "Using N=%d hugepage test sizes", N);
+ }
- for (i = 0; i < ARRAY_SIZE(tst_sizes); ++i)
+ for (i = 0; i < ARRAY_SIZE(tst_sizes); ++i) {
+ tst_res(TINFO, "Testing size: %ld bytes", tst_sizes[i]);
shm_test(tst_sizes[i]);
+ }
tst_res(TPASS, "No regression found.");
}
--
2.53.0
More information about the ltp
mailing list