[LTP] [PATCH v2] kill01: New case cgroup kill
Li Wang
liwang@redhat.com
Mon Mar 6 11:16:26 CET 2023
On Sun, Mar 5, 2023 at 5:14 PM Wei Gao via ltp <ltp@lists.linux.it> wrote:
> Signed-off-by: Wei Gao <wegao@suse.com>
> ---
> lib/tst_cgroup.c | 1 +
> runtest/controllers | 1 +
> testcases/kernel/controllers/kill/.gitignore | 1 +
> testcases/kernel/controllers/kill/Makefile | 6 +
> testcases/kernel/controllers/kill/kill01.c | 130 +++++++++++++++++++
> 5 files changed, 139 insertions(+)
> create mode 100644 testcases/kernel/controllers/kill/.gitignore
> create mode 100644 testcases/kernel/controllers/kill/Makefile
> create mode 100644 testcases/kernel/controllers/kill/kill01.c
>
> diff --git a/lib/tst_cgroup.c b/lib/tst_cgroup.c
> index 50699bc63..77575431d 100644
> --- a/lib/tst_cgroup.c
> +++ b/lib/tst_cgroup.c
> @@ -166,6 +166,7 @@ static const struct cgroup_file cgroup_ctrl_files[] = {
> { "cgroup.controllers", NULL, 0 },
> { "cgroup.subtree_control", NULL, 0 },
> { "cgroup.clone_children", "cgroup.clone_children", 0 },
> + { "cgroup.kill", NULL, 0 },
> { }
> };
>
> diff --git a/runtest/controllers b/runtest/controllers
> index 8d1b936bf..2f69a8ec2 100644
> --- a/runtest/controllers
> +++ b/runtest/controllers
> @@ -23,6 +23,7 @@ memcontrol01 memcontrol01
> memcontrol02 memcontrol02
> memcontrol03 memcontrol03
> memcontrol04 memcontrol04
> +kill01 kill01
>
> cgroup_fj_function_debug cgroup_fj_function.sh debug
> cgroup_fj_function_cpuset cgroup_fj_function.sh cpuset
> diff --git a/testcases/kernel/controllers/kill/.gitignore
> b/testcases/kernel/controllers/kill/.gitignore
> new file mode 100644
> index 000000000..4f9649e27
> --- /dev/null
> +++ b/testcases/kernel/controllers/kill/.gitignore
> @@ -0,0 +1 @@
> +/kill01
> diff --git a/testcases/kernel/controllers/kill/Makefile
> b/testcases/kernel/controllers/kill/Makefile
> new file mode 100644
> index 000000000..5ea7d67db
> --- /dev/null
> +++ b/testcases/kernel/controllers/kill/Makefile
> @@ -0,0 +1,6 @@
> +# SPDX-License-Identifier: GPL-2.0-or-later
> +
> +top_srcdir ?= ../../../..
> +
> +include $(top_srcdir)/include/mk/testcases.mk
> +include $(top_srcdir)/include/mk/generic_leaf_target.mk
> diff --git a/testcases/kernel/controllers/kill/kill01.c
> b/testcases/kernel/controllers/kill/kill01.c
> new file mode 100644
> index 000000000..aafc7ba5f
> --- /dev/null
> +++ b/testcases/kernel/controllers/kill/kill01.c
> @@ -0,0 +1,130 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/*
> + * Copyright (c) 2012 Christian Brauner <brauner-AT-kernel.org>
> + * Copyright (c) 2023 SUSE LLC <wegao@suse.com>
> + */
> +
> +/*\
> + * [Description]
> + *
> + * Basic cgroup kill test.
> + *
> + */
> +
> +#include <errno.h>
> +#include <linux/limits.h>
> +#include <stdbool.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +#include <sys/types.h>
> +#include <unistd.h>
> +#include <sys/wait.h>
> +
> +#include "lapi/syscalls.h"
> +#include "tst_test.h"
> +
> +#define pid_num 100
>
My concern about defining pid_num as a fixed variable is that
the test may spend a long time on a single_cpu or slower system.
A sanity way is probably to use a dynamical number according
to the test bed available CPUs (e.g. tst_ncpus_available() + 1).
> +static struct tst_cg_group *cg_child_test_simple;
> +
> +
> +static int wait_for_pid(pid_t pid)
> +{
> + int status, ret;
> +
> +again:
> + ret = waitpid(pid, &status, 0);
> + if (ret == -1) {
> + if (errno == EINTR)
> + goto again;
> +
> + return -1;
> + }
> +
> + if (!WIFEXITED(status))
> + return -1;
> +
> + return WEXITSTATUS(status);
> +}
> +
> +/*
> + * A simple process running in a sleep loop until being
> + * re-parented.
> + */
> +static int child_fn(void)
> +{
> + int ppid = getppid();
> +
> + while (getppid() == ppid)
> + usleep(1000);
> +
> + return getppid() == ppid;
>
why do we need to return the value of this comparison?
I suppose most time the child does _not_ have a chance
to get here.
> +}
> +
> +static int cg_run_nowait(const struct tst_cg_group *const cg,
> + int (*fn)(void))
> +{
> + int pid;
> +
> + pid = fork();
>
use SAFE_FORK() maybe better.
> + if (pid == 0) {
> + SAFE_CG_PRINTF(cg, "cgroup.procs", "%d", getpid());
> + exit(fn());
> + }
> +
> + return pid;
> +}
> +
> +static int cg_wait_for_proc_count(const struct tst_cg_group *cg, int
> count)
> +{
> + char buf[20 * pid_num] = {0};
> + int attempts;
> + char *ptr;
> +
> + for (attempts = 10; attempts >= 0; attempts--) {
> + int nr = 0;
> +
> + SAFE_CG_READ(cg, "cgroup.procs", buf, sizeof(buf));
> +
> + for (ptr = buf; *ptr; ptr++)
> + if (*ptr == '\n')
> + nr++;
> +
> + if (nr >= count)
> + return 0;
> +
> + usleep(100000);
>
In this loop, there is only 1 second for waiting for children ready.
So, if test on a slower/overload machine that is a bit longer than this
time,
what will happen? shouldn't we handle this as a corner case failure?
> + }
> +
> + return -1;
> +}
> +
> +static void run(void)
> +{
> +
> + pid_t pids[100];
> + int i;
> +
> + cg_child_test_simple = tst_cg_group_mk(tst_cg, "cg_test_simple");
> +
> + for (i = 0; i < pid_num; i++)
> + pids[i] = cg_run_nowait(cg_child_test_simple, child_fn);
> +
> + TST_EXP_PASS(cg_wait_for_proc_count(cg_child_test_simple,
> pid_num));
> + SAFE_CG_PRINTF(cg_child_test_simple, "cgroup.kill", "%d", 1);
> +
> + for (i = 0; i < pid_num; i++) {
> + /* wait_for_pid(pids[i]); */
> + TST_EXP_PASS_SILENT(wait_for_pid(pids[i]) == SIGKILL);
> + }
> +
> + cg_child_test_simple = tst_cg_group_rm(cg_child_test_simple);
> +}
> +
> +static struct tst_test test = {
> + .test_all = run,
> + .forks_child = 1,
> + .max_runtime = 5,
> + .needs_cgroup_ctrls = (const char *const []){ "memory", NULL },
> + .needs_cgroup_ver = TST_CG_V2,
> +};
> --
> 2.35.3
>
>
> --
> Mailing list info: https://lists.linux.it/listinfo/ltp
>
>
--
Regards,
Li Wang
More information about the ltp
mailing list