[LTP] [PATCH v3] syscalls/nice05: Add testcase for nice() syscall

Zhao Gongyi zhaogongyi@huawei.com
Sat Jul 2 04:44:28 CEST 2022


Add test verifies that the low nice thread executes more
time than the high nice thread since the two thread binded
on the same cpu.

Signed-off-by: Zhao Gongyi <zhaogongyi@huawei.com>
---
 runtest/syscalls                          |   1 +
 testcases/kernel/syscalls/nice/.gitignore |   1 +
 testcases/kernel/syscalls/nice/Makefile   |   2 +
 testcases/kernel/syscalls/nice/nice05.c   | 184 ++++++++++++++++++++++
 4 files changed, 188 insertions(+)
 create mode 100644 testcases/kernel/syscalls/nice/nice05.c

diff --git a/runtest/syscalls b/runtest/syscalls
index 36fc50aeb..784f3184d 100644
--- a/runtest/syscalls
+++ b/runtest/syscalls
@@ -901,6 +901,7 @@ nice01 nice01
 nice02 nice02
 nice03 nice03
 nice04 nice04
+nice05 nice05

 open01 open01
 open01A symlink01 -T open01
diff --git a/testcases/kernel/syscalls/nice/.gitignore b/testcases/kernel/syscalls/nice/.gitignore
index 9d7a1bb43..58d64779e 100644
--- a/testcases/kernel/syscalls/nice/.gitignore
+++ b/testcases/kernel/syscalls/nice/.gitignore
@@ -2,3 +2,4 @@
 /nice02
 /nice03
 /nice04
+/nice05
diff --git a/testcases/kernel/syscalls/nice/Makefile b/testcases/kernel/syscalls/nice/Makefile
index 044619fb8..02e78a295 100644
--- a/testcases/kernel/syscalls/nice/Makefile
+++ b/testcases/kernel/syscalls/nice/Makefile
@@ -3,6 +3,8 @@

 top_srcdir		?= ../../../..

+nice05: CFLAGS += -pthread
+
 include $(top_srcdir)/include/mk/testcases.mk

 include $(top_srcdir)/include/mk/generic_leaf_target.mk
diff --git a/testcases/kernel/syscalls/nice/nice05.c b/testcases/kernel/syscalls/nice/nice05.c
new file mode 100644
index 000000000..c0a528974
--- /dev/null
+++ b/testcases/kernel/syscalls/nice/nice05.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright(c) 2022 Huawei Technologies Co., Ltd
+ * Author: Li Mengfei <limengfei4@huawei.com>
+ *         Zhao Gongyi <zhaogongyi@huawei.com>
+ */
+
+/*\
+ * [Description]
+ *
+ * 1. Create a high nice thread and a low nice thread, the main
+ *    thread wake them at the same time
+ * 2. Both threads run on the same CPU
+ * 3. Verify that the low nice thread executes more time than
+ *    the high nice thread
+ */
+
+#define _GNU_SOURCE
+#include <pthread.h>
+#include <sys/types.h>
+#include <stdio.h>
+#include "tst_test.h"
+#include "tst_safe_pthread.h"
+#include "lapi/syscalls.h"
+
+static pthread_barrier_t barrier;
+static int some_cpu;
+static cpu_set_t *set;
+static pid_t *child_tid;
+
+static void set_nice(int nice_inc)
+{
+	int orig_nice;
+
+	orig_nice = SAFE_GETPRIORITY(PRIO_PROCESS, 0);
+
+	TEST(nice(nice_inc));
+
+	if (TST_RET != (orig_nice + nice_inc)) {
+		tst_brk(TBROK | TTERRNO, "nice(%d) returned %li, expected %i",
+			nice_inc, TST_RET, orig_nice + nice_inc);
+	}
+
+	if (TST_ERR)
+		tst_brk(TBROK | TTERRNO, "nice(%d) failed", nice_inc);
+
+}
+
+static void *nice_low_thread(void *arg)
+{
+	volatile int number = 0;
+
+	child_tid[0] = tst_syscall(__NR_gettid);
+	set_nice((intptr_t)arg);
+	TEST(pthread_barrier_wait(&barrier));
+	if (TST_RET != 0 && TST_RET != PTHREAD_BARRIER_SERIAL_THREAD)
+		tst_brk(TBROK | TRERRNO, "pthread_barrier_wait() failed");
+
+	while (1)
+		number++;
+
+	return NULL;
+}
+
+static void *nice_high_thread(void *arg)
+{
+	volatile int number = 0;
+
+	child_tid[1] = tst_syscall(__NR_gettid);
+	set_nice((intptr_t)arg);
+	TEST(pthread_barrier_wait(&barrier));
+	if (TST_RET != 0 && TST_RET != PTHREAD_BARRIER_SERIAL_THREAD)
+		tst_brk(TBROK | TRERRNO, "pthread_barrier_wait() failed");
+
+	while (1)
+		number++;
+
+	return NULL;
+}
+
+static void setup(void)
+{
+	size_t size;
+	size_t i;
+	int nrcpus = 1024;
+
+	set = CPU_ALLOC(nrcpus);
+	if (!set)
+		tst_brk(TBROK | TERRNO, "CPU_ALLOC()");
+
+	size = CPU_ALLOC_SIZE(nrcpus);
+	CPU_ZERO_S(size, set);
+	if (sched_getaffinity(0, size, set) < 0)
+		tst_brk(TBROK | TERRNO, "sched_getaffinity()");
+
+	for (i = 0; i < size * 8; i++)
+		if (CPU_ISSET_S(i, size, set))
+			some_cpu = i;
+
+	CPU_ZERO_S(size, set);
+	CPU_SET_S(some_cpu, size, set);
+	if (sched_setaffinity(0, size, set) < 0)
+		tst_brk(TBROK | TERRNO, "sched_setaffinity()");
+
+	child_tid = SAFE_MMAP(0, sizeof(pid_t) * 2,
+			PROT_WRITE | PROT_READ,
+			MAP_SHARED | MAP_ANONYMOUS, 0, 0);
+}
+
+static void cleanup(void)
+{
+	if (set)
+		CPU_FREE(set);
+
+	if (child_tid)
+		SAFE_MUNMAP(child_tid, sizeof(pid_t) * 2);
+}
+
+static void verify_nice(void)
+{
+	intptr_t nice_inc_high = -1;
+	intptr_t nice_inc_low = -2;
+	char buf[4096] = {0};
+	float sum_exec_runtime[2];
+	pthread_t nice_low, nice_high;
+	pid_t pid;
+
+	pid = SAFE_FORK();
+	if (!pid) {
+		TEST(pthread_barrier_init(&barrier, NULL, 3));
+		if (TST_RET != 0) {
+			tst_brk(TBROK | TTERRNO,
+				"pthread_barrier_init() failed");
+		}
+
+		SAFE_PTHREAD_CREATE(&nice_high, NULL, nice_high_thread,
+			(void *)nice_inc_high);
+		SAFE_PTHREAD_CREATE(&nice_low, NULL, nice_low_thread,
+			(void *)nice_inc_low);
+
+		TEST(pthread_barrier_wait(&barrier));
+		if (TST_RET != 0 && TST_RET != PTHREAD_BARRIER_SERIAL_THREAD) {
+			tst_brk(TBROK | TTERRNO,
+				"pthread_barrier_wait() failed");
+		}
+
+		while (1)
+			pause();
+	}
+
+	sleep(tst_remaining_runtime());
+	SAFE_KILL(pid, SIGSTOP);
+
+	sprintf(buf, "/proc/%d/sched", child_tid[0]);
+	SAFE_FILE_SCANF(buf, "%*s %*s %*s %*s %*s"
+			"%*s %*s %*f %*s %*s %*f %*s %*s %f",
+			&sum_exec_runtime[0]);
+
+	sprintf(buf, "/proc/%d/sched", child_tid[1]);
+	SAFE_FILE_SCANF(buf, "%*s %*s %*s %*s %*s"
+			"%*s %*s %*f %*s %*s %*f %*s %*s %f",
+			&sum_exec_runtime[1]);
+
+	if (sum_exec_runtime[0] < sum_exec_runtime[1]) {
+		tst_res(TFAIL, "nice_low_thread sum_exec_runtime: %f "
+			"nice_high_thread sum_exec_runtime: %f",
+			sum_exec_runtime[0], sum_exec_runtime[1]);
+	} else {
+		tst_res(TPASS, "executes more cycles "
+			"than the high nice thread");
+	}
+
+	SAFE_KILL(pid, SIGKILL);
+	SAFE_WAIT(NULL);
+}
+
+static struct tst_test test = {
+	.setup = setup,
+	.cleanup = cleanup,
+	.test_all = verify_nice,
+	.needs_root = 1,
+	.forks_child = 1,
+	.max_runtime = 3,
+};
--
2.17.1



More information about the ltp mailing list