[LTP] [PATCH v9 1/2] kill01: New case cgroup kill

Shivani Samala samalashivani123@gmail.com
Thu Apr 27 14:13:03 CEST 2023


Please remove me from your mailing list. I’m not from your team. Why are
you sending all this to me. Just remove me from your list.

On Wed, 26 Apr 2023 at 6:40 PM, Cyril Hrubis <chrubis@suse.cz> wrote:

> Hi!
> > ---
> >  lib/tst_cgroup.c                              |   1 +
> >  runtest/controllers                           |   1 +
> >  .../kernel/controllers/cgroup/.gitignore      |   1 +
> >  .../kernel/controllers/cgroup/cgroup_core03.c | 144 ++++++++++++++++++
> >  4 files changed, 147 insertions(+)
> >  create mode 100644 testcases/kernel/controllers/cgroup/cgroup_core03.c
> >
> > diff --git a/lib/tst_cgroup.c b/lib/tst_cgroup.c
> > index 50699bc63..77575431d 100644
> > --- a/lib/tst_cgroup.c
> > +++ b/lib/tst_cgroup.c
> > @@ -166,6 +166,7 @@ static const struct cgroup_file cgroup_ctrl_files[]
> = {
> >       { "cgroup.controllers", NULL, 0 },
> >       { "cgroup.subtree_control", NULL, 0 },
> >       { "cgroup.clone_children", "cgroup.clone_children", 0 },
> > +     { "cgroup.kill", NULL, 0 },
> >       { }
> >  };
> >
> > diff --git a/runtest/controllers b/runtest/controllers
> > index 8d1b936bf..93c52c439 100644
> > --- a/runtest/controllers
> > +++ b/runtest/controllers
> > @@ -1,6 +1,7 @@
> >  #DESCRIPTION:Resource Management testing
> >  cgroup_core01        cgroup_core01
> >  cgroup_core02        cgroup_core02
> > +cgroup_core03        cgroup_core03
> >  cgroup               cgroup_regression_test.sh
> >  memcg_regression     memcg_regression_test.sh
> >  memcg_test_3 memcg_test_3
> > diff --git a/testcases/kernel/controllers/cgroup/.gitignore
> b/testcases/kernel/controllers/cgroup/.gitignore
> > index 8deae77da..9f1d1ada9 100644
> > --- a/testcases/kernel/controllers/cgroup/.gitignore
> > +++ b/testcases/kernel/controllers/cgroup/.gitignore
> > @@ -2,3 +2,4 @@
> >  /cgroup_regression_getdelays
> >  /cgroup_core01
> >  /cgroup_core02
> > +/cgroup_core03
> > diff --git a/testcases/kernel/controllers/cgroup/cgroup_core03.c
> b/testcases/kernel/controllers/cgroup/cgroup_core03.c
> > new file mode 100644
> > index 000000000..2a6941c44
> > --- /dev/null
> > +++ b/testcases/kernel/controllers/cgroup/cgroup_core03.c
> > @@ -0,0 +1,144 @@
> > +// SPDX-License-Identifier: GPL-2.0-or-later
> > +/*
> > + * Copyright (c) 2012 Christian Brauner <brauner-AT-kernel.org>
> > + * Copyright (c) 2023 SUSE LLC <wegao@suse.com>
> > + */
> > +
> > +/*\
> > + * [Description]
> > + *
> > + * This test is copied from kselftest
> > + * tools/testing/selftests/cgroup/test_kill.c
> > + * Only simple test implemented within current case, the other cases
> such
> > + * as test_cgkill_tree and test_cgkill_forkbomb can be created later.
> > + *
> > + */
> > +
> > +#include <sys/wait.h>
> > +
> > +#include "lapi/syscalls.h"
> > +#include "tst_test.h"
> > +
> > +#define MAX_PID_NUM 100
> > +#define PID_NUM MIN(MAX_PID_NUM, (tst_ncpus_available() + 1))
> > +#define BUF_LEN (20 * PID_NUM)
> > +
> > +static int *data_ptr;
> > +static char *buf;
> > +static struct tst_cg_group *cg_child_test_simple;
> > +
> > +static int wait_for_pid(pid_t pid)
> > +{
> > +     int status, ret;
> > +
> > +again:
> > +     ret = waitpid(pid, &status, 0);
> > +     if (ret == -1) {
> > +             if (errno == EINTR)
> > +                     goto again;
> > +
> > +             return -1;
> > +     }
> > +
> > +     if (WIFSIGNALED(status))
> > +             return 0;
> > +
> > +     return -1;
> > +}
> > +
> > +/*
> > + * A simple process running in a sleep loop until being
> > + * re-parented.
> > + */
> > +static void child_fn(void)
> > +{
> > +     int ppid = getppid();
> > +
> > +     while (getppid() == ppid)
> > +             usleep(1000);
> > +
>
> What's the reason we are waiting for reparenting here? Aren't these
> processes killed anyways? I suppose that we can just call pause() here
> instead.
>
> > +}
> > +
> > +static int cg_run_nowait(const struct tst_cg_group *const cg,
> > +               void (*fn)(void))
> > +{
> > +     int pid;
> > +
> > +     pid = SAFE_FORK();
> > +     if (pid == 0) {
> > +             SAFE_CG_PRINTF(cg, "cgroup.procs", "%d", getpid());
> > +             tst_atomic_inc(data_ptr);
> > +             if (tst_atomic_load(data_ptr) == PID_NUM)
> > +                     TST_CHECKPOINT_WAKE(0);
>
> I do not think that this is 100% correct, as it is we may end up calling
> the TST_CHECKOPOINT_WAKE(0) more than once here, since there is a race
> between increment and load. An if that happens only one of the calls to
> TST_CHECKPOINT_WAKE(0) will succeeds, while the rest would fail and
> break the test.
>
> First of all the tst_atomic_inc() returns the value after the increment,
> so that we can actually get the counter value without a race.
>
> Secondly I think that we can get rid of the atomic operations
> completely, we just have to loop N times over the TST_CHECKPOINT_WAIT()
> in the run() function and call TST_CHECKPOINT_WAKE() in each child
> instance. We can also add TST_CHECKPOINT_WAIT_NR() to the test library
> that would also take a number of wait calls passed as a parameter.
>
> > +             fn();
> > +     }
> > +
> > +     return pid;
> > +}
> > +
> > +static int cg_wait_for_proc_count(const struct tst_cg_group *cg, int
> count)
>
> The function does not wait for anything, so it should be called
> cg_count_procs() or similar.
>
> > +{
> > +     char *ptr;
> > +
> > +     int nr = 0;
> > +
> > +     SAFE_CG_READ(cg, "cgroup.procs", buf, BUF_LEN);
> > +
> > +     for (ptr = buf; *ptr; ptr++)
> > +             if (*ptr == '\n')
> > +                     nr++;
> > +
> > +     if (nr >= count)
> > +             return 0;
> > +
> > +     tst_res(TINFO, "Expect process num is %d but get %d", count, nr);
> > +
> > +     return -1;
> > +}
> > +
> > +static void run(void)
> > +{
> > +     pid_t pids[MAX_PID_NUM];
> > +     int i;
> > +     *data_ptr = 0;
> > +
> > +     cg_child_test_simple = tst_cg_group_mk(tst_cg, "cg_test_simple");
> > +
> > +     memset(buf, 0, BUF_LEN);
> > +
> > +     for (i = 0; i < PID_NUM; i++)
> > +             pids[i] = cg_run_nowait(cg_child_test_simple, child_fn);
> > +
> > +     TST_CHECKPOINT_WAIT(0);
> > +
> > +     TST_EXP_PASS(cg_wait_for_proc_count(cg_child_test_simple,
> PID_NUM));
>
> This is very minor, but I would make the function return the number of
> pids found in the cgroup and called it as:
>
>         TST_EXP_VAL(cg_count_procs(cg_child_test_simple), pid_num);
>
> > +     SAFE_CG_PRINTF(cg_child_test_simple, "cgroup.kill", "%d", 1);
> > +
> > +     for (i = 0; i < PID_NUM; i++)
> > +             TST_EXP_PASS_SILENT(wait_for_pid(pids[i]));
>
> Then we can do here:
>
>         TST_EXP_VAL(cg_count_procs(cg_child_test_simple), 0);
>
> > +     cg_child_test_simple = tst_cg_group_rm(cg_child_test_simple);
> > +}
> > +
> > +static void setup(void)
> > +{
> > +     buf = tst_alloc(BUF_LEN);
> > +     data_ptr = SAFE_MMAP(NULL, sizeof(uintptr_t), PROT_READ |
> PROT_WRITE,
> > +                                              MAP_SHARED |
> MAP_ANONYMOUS, -1, 0);
> > +}
> > +
> > +static void cleanup(void)
> > +{
> > +     if (data_ptr)
> > +             SAFE_MUNMAP(data_ptr, sizeof(uintptr_t));
> > +}
> > +
> > +static struct tst_test test = {
> > +     .test_all = run,
> > +     .setup = setup,
> > +     .cleanup = cleanup,
> > +     .forks_child = 1,
> > +     .max_runtime = 20,
> > +     .needs_cgroup_ctrls = (const char *const []){ "pseudo", NULL },
> > +     .needs_cgroup_ver = TST_CG_V2,
> > +     .needs_checkpoints = 1,
> > +};
> > --
> > 2.35.3
> >
> >
> > --
> > Mailing list info: https://lists.linux.it/listinfo/ltp
>
> --
> Cyril Hrubis
> chrubis@suse.cz
>
> --
> Mailing list info: https://lists.linux.it/listinfo/ltp
>


More information about the ltp mailing list