[LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel
Petr Vorel
pvorel@suse.cz
Wed Apr 6 07:39:53 CEST 2022
Hi Richard,
could you please have look into this one?
Bug in it could highly affect CVE reproducibility.
Kind regards,
Petr
> Hi, I'm working to get fzsync working with the Android kernel, which
> does not have pthread_cancel available.
> In the absence of pthread_cancel, when thread A exits due to a break,
> thread B will get stuck in an infinite loop while waiting for thread A
> to progress.
> Instead of cancelling thread B, we can use the exit flag to break out of
> thread B's loop. This should also remove the need for the wrapper
> around the thread.
> Signed-off-by: Edward Liaw <edliaw@google.com>
> ---
> include/tst_fuzzy_sync.h | 68 +++++++++++------------------
> lib/newlib_tests/tst_fuzzy_sync01.c | 7 +--
> lib/newlib_tests/tst_fuzzy_sync02.c | 7 +--
> 3 files changed, 27 insertions(+), 55 deletions(-)
> diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
> index bc3450294..2c120f077 100644
> --- a/include/tst_fuzzy_sync.h
> +++ b/include/tst_fuzzy_sync.h
> @@ -60,7 +60,6 @@
> */
> #include <math.h>
> -#include <pthread.h>
> #include <stdbool.h>
> #include <stdlib.h>
> #include <sys/time.h>
> @@ -233,36 +232,12 @@ static inline void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
> static inline void tst_fzsync_pair_cleanup(struct tst_fzsync_pair *pair)
> {
> if (pair->thread_b) {
> - /* Revoke thread B if parent hits accidental break */
> - if (!pair->exit) {
> - tst_atomic_store(1, &pair->exit);
> - usleep(100000);
> - pthread_cancel(pair->thread_b);
> - }
> + tst_atomic_store(1, &pair->exit);
> SAFE_PTHREAD_JOIN(pair->thread_b, NULL);
> pair->thread_b = 0;
> }
> }
> -/** To store the run_b pointer and pass to tst_fzsync_thread_wrapper */
> -struct tst_fzsync_run_thread {
> - void *(*func)(void *);
> - void *arg;
> -};
> -
> -/**
> - * Wrap run_b for tst_fzsync_pair_reset to enable pthread cancel
> - * at the start of the thread B.
> - */
> -static inline void *tst_fzsync_thread_wrapper(void *run_thread)
> -{
> - struct tst_fzsync_run_thread t = *(struct tst_fzsync_run_thread *)run_thread;
> -
> - pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
> - pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
> - return t.func(t.arg);
> -}
> -
> /**
> * Zero some stat fields
> *
> @@ -311,13 +286,8 @@ static inline void tst_fzsync_pair_reset(struct tst_fzsync_pair *pair,
> pair->a_cntr = 0;
> pair->b_cntr = 0;
> pair->exit = 0;
> - if (run_b) {
> - static struct tst_fzsync_run_thread wrap_run_b;
> -
> - wrap_run_b.func = run_b;
> - wrap_run_b.arg = NULL;
> - SAFE_PTHREAD_CREATE(&pair->thread_b, 0, tst_fzsync_thread_wrapper, &wrap_run_b);
> - }
> + if (run_b)
> + SAFE_PTHREAD_CREATE(&pair->thread_b, 0, run_b, 0);
> pair->exec_time_start = (float)tst_timeout_remaining();
> }
> @@ -554,6 +524,7 @@ static inline void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
> * @param our_cntr The counter for the thread we are on
> * @param other_cntr The counter for the thread we are synchronising with
> * @param spins A pointer to the spin counter or NULL
> + * @param exit Exit flag when we need to break out of the wait loop
> *
> * Used by tst_fzsync_pair_wait_a(), tst_fzsync_pair_wait_b(),
> * tst_fzsync_start_race_a(), etc. If the calling thread is ahead of the other
> @@ -566,6 +537,7 @@ static inline void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
> static inline void tst_fzsync_pair_wait(int *our_cntr,
> int *other_cntr,
> int *spins,
> + int *exit,
> bool yield_in_wait)
> {
> if (tst_atomic_inc(other_cntr) == INT_MAX) {
> @@ -578,7 +550,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
> */
> if (yield_in_wait) {
> while (tst_atomic_load(our_cntr) > 0
> - && tst_atomic_load(our_cntr) < INT_MAX) {
> + && tst_atomic_load(our_cntr) < INT_MAX
> + && !tst_atomic_load(exit)) {
> if (spins)
> (*spins)++;
> @@ -586,7 +559,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
> }
> } else {
> while (tst_atomic_load(our_cntr) > 0
> - && tst_atomic_load(our_cntr) < INT_MAX) {
> + && tst_atomic_load(our_cntr) < INT_MAX
> + && !tst_atomic_load(exit)) {
> if (spins)
> (*spins)++;
> }
> @@ -599,10 +573,12 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
> * is restored and we can continue.
> */
> if (yield_in_wait) {
> - while (tst_atomic_load(our_cntr) > 1)
> + while (tst_atomic_load(our_cntr) > 1
> + && !tst_atomic_load(exit))
> sched_yield();
> } else {
> - while (tst_atomic_load(our_cntr) > 1)
> + while (tst_atomic_load(our_cntr) > 1
> + && !tst_atomic_load(exit))
> ;
> }
> } else {
> @@ -612,14 +588,16 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
> */
> if (yield_in_wait) {
> while (tst_atomic_load(our_cntr) <
> - tst_atomic_load(other_cntr)) {
> + tst_atomic_load(other_cntr)
> + && !tst_atomic_load(exit)) {
> if (spins)
> (*spins)++;
> sched_yield();
> }
> } else {
> while (tst_atomic_load(our_cntr) <
> - tst_atomic_load(other_cntr)) {
> + tst_atomic_load(other_cntr)
> + && !tst_atomic_load(exit)) {
> if (spins)
> (*spins)++;
> }
> @@ -635,7 +613,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
> */
> static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
> {
> - tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, NULL, pair->yield_in_wait);
> + tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr,
> + NULL, &pair->exit, pair->yield_in_wait);
> }
> /**
> @@ -646,7 +625,8 @@ static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
> */
> static inline void tst_fzsync_wait_b(struct tst_fzsync_pair *pair)
> {
> - tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, NULL, pair->yield_in_wait);
> + tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr,
> + NULL, &pair->exit, pair->yield_in_wait);
> }
> /**
> @@ -758,7 +738,8 @@ static inline void tst_fzsync_start_race_a(struct tst_fzsync_pair *pair)
> static inline void tst_fzsync_end_race_a(struct tst_fzsync_pair *pair)
> {
> tst_fzsync_time(&pair->a_end);
> - tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, &pair->spins, pair->yield_in_wait);
> + tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr,
> + &pair->spins, &pair->exit, pair->yield_in_wait);
> }
> /**
> @@ -796,7 +777,8 @@ static inline void tst_fzsync_start_race_b(struct tst_fzsync_pair *pair)
> static inline void tst_fzsync_end_race_b(struct tst_fzsync_pair *pair)
> {
> tst_fzsync_time(&pair->b_end);
> - tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, &pair->spins, pair->yield_in_wait);
> + tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr,
> + &pair->spins, &pair->exit, pair->yield_in_wait);
> }
> /**
> diff --git a/lib/newlib_tests/tst_fuzzy_sync01.c b/lib/newlib_tests/tst_fuzzy_sync01.c
> index ae3ea4e09..5f23a085b 100644
> --- a/lib/newlib_tests/tst_fuzzy_sync01.c
> +++ b/lib/newlib_tests/tst_fuzzy_sync01.c
> @@ -182,15 +182,10 @@ static void *worker(void *v)
> static void run(unsigned int i)
> {
> const struct window a = races[i].a;
> - struct tst_fzsync_run_thread wrap_run_b = {
> - .func = worker,
> - .arg = &i,
> - };
> int cs, ct, r, too_early = 0, critical = 0, too_late = 0;
> tst_fzsync_pair_reset(&pair, NULL);
> - SAFE_PTHREAD_CREATE(&pair.thread_b, 0, tst_fzsync_thread_wrapper,
> - &wrap_run_b);
> + SAFE_PTHREAD_CREATE(&pair.thread_b, 0, worker, &i);
> while (tst_fzsync_run_a(&pair)) {
> diff --git a/lib/newlib_tests/tst_fuzzy_sync02.c b/lib/newlib_tests/tst_fuzzy_sync02.c
> index 51075f3c3..c1c2a5327 100644
> --- a/lib/newlib_tests/tst_fuzzy_sync02.c
> +++ b/lib/newlib_tests/tst_fuzzy_sync02.c
> @@ -125,16 +125,11 @@ static void run(unsigned int i)
> {
> const struct window a = to_abs(races[i].a);
> const struct window ad = to_abs(races[i].ad);
> - struct tst_fzsync_run_thread wrap_run_b = {
> - .func = worker,
> - .arg = &i,
> - };
> int critical = 0;
> int now, fin;
> tst_fzsync_pair_reset(&pair, NULL);
> - SAFE_PTHREAD_CREATE(&pair.thread_b, 0, tst_fzsync_thread_wrapper,
> - &wrap_run_b);
> + SAFE_PTHREAD_CREATE(&pair.thread_b, 0, worker, &i);
> while (tst_fzsync_run_a(&pair)) {
> c = 0;
More information about the ltp
mailing list