[LTP] [PATCH 1/2] lib: add extra calibrated runtime to slow tests
Cyril Hrubis
chrubis@suse.cz
Tue Dec 3 13:18:54 CET 2024
Hi!
> +/*
> + * List of debug-kernel config options that may degrade performance when enabled.
> + */
> +static const char * const tst_kconf_debug_options[][2] = {
> + {"CONFIG_PROVE_LOCKING=y", NULL},
> + {"CONFIG_LOCKDEP=y", NULL},
> + {"CONFIG_DEBUG_SPINLOCK=y", NULL},
> + {"CONFIG_DEBUG_RT_MUTEXES=y", NULL},
> + {"CONFIG_DEBUG_MUTEXES=y", NULL},
> + {"CONFIG_DEBUG_PAGEALLOC=y", NULL},
> + {"CONFIG_KASAN=y", NULL},
> + {"CONFIG_SLUB_RCU_DEBUG=y", NULL},
> + {"CONFIG_TRACE_IRQFLAGS=y", NULL},
> + {"CONFIG_LATENCYTOP=y", NULL},
> + {"CONFIG_DEBUG_NET=y", NULL},
> + {"CONFIG_EXT4_DEBUG=y", NULL},
> + {"CONFIG_QUOTA_DEBUG=y", NULL},
> + {"CONFIG_FAULT_INJECTION=y", NULL},
> + {"CONFIG_DEBUG_OBJECTS=y", NULL},
> + {NULL, NULL}
> +};
> +
> +static inline int tst_kconfig_debug_matches(void)
> +{
> + int i, num = 1;
> +
> + for (i = 0; tst_kconf_debug_options[i][0] != NULL; i++)
> + num += !tst_kconfig_check(tst_kconf_debug_options[i]);
> +
> + return num;
> +}
> +
> #endif /* TST_KCONFIG_H__ */
> diff --git a/include/tst_timer.h b/include/tst_timer.h
> index 6fb940020..268fc8389 100644
> --- a/include/tst_timer.h
> +++ b/include/tst_timer.h
> @@ -17,6 +17,7 @@
> #include <mqueue.h>
> #include <time.h>
> #include "tst_test.h"
> +#include "tst_clocks.h"
> #include "lapi/common_timers.h"
> #include "lapi/posix_types.h"
> #include "lapi/syscalls.h"
> @@ -1074,4 +1075,33 @@ static inline long long tst_timer_elapsed_us(void)
> return tst_timespec_to_us(tst_timer_elapsed());
> }
>
> +#define CALLIBRATE_LOOPS 120000000
> +
> +/*
> + * Measures the time taken by the CPU to perform a specified
> + * number of empty loops for calibration.
> + */
> +static inline int tst_callibrate(void)
> +{
> + int i;
> + struct timespec start, stop;
> + long long diff;
> +
> + for (i = 0; i < CALLIBRATE_LOOPS; i++)
> + __asm__ __volatile__ ("" : "+g" (i) : :);
> +
> + tst_clock_gettime(CLOCK_MONOTONIC_RAW, &start);
> +
> + for (i = 0; i < CALLIBRATE_LOOPS; i++)
> + __asm__ __volatile__ ("" : "+g" (i) : :);
> +
> + tst_clock_gettime(CLOCK_MONOTONIC_RAW, &stop);
> +
> + diff = tst_timespec_diff_us(stop, start);
> +
> + tst_res(TINFO, "CPU did %i loops in %llius", CALLIBRATE_LOOPS, diff);
> +
> + return diff;
> +}
> +
> #endif /* TST_TIMER */
> diff --git a/lib/tst_test.c b/lib/tst_test.c
> index 8db554dea..296683ffb 100644
> --- a/lib/tst_test.c
> +++ b/lib/tst_test.c
> @@ -549,13 +549,16 @@ static void parse_mul(float *mul, const char *env_name, float min, float max)
> static int multiply_runtime(int max_runtime)
> {
> static float runtime_mul = -1;
> + static int extra_runtime = 0;
>
> if (max_runtime <= 0)
> return max_runtime;
>
> parse_mul(&runtime_mul, "LTP_RUNTIME_MUL", 0.0099, 100);
>
> - return max_runtime * runtime_mul;
> + extra_runtime = (tst_callibrate() / 1000) * tst_kconfig_debug_matches();
> +
> + return (max_runtime + extra_runtime) * runtime_mul;
I was hoping for a simpler calculation something as:
if (any_kconfig_debug_enabled())
max_runtime *= 4;
Or something along these lines. I do not think that the kernel will get
slower with each debug option enabled. And we are not looking for a
tight fit either, so the callbration loop does not make much sense here.
It also makes sense that a debug kernel will be N times slower, where
the exact N would be hard to find out, but in this case we just need a
reasonable upper bound.
And this would work nicely for the starvation test too, since we
calculate a tight fit there first, then we would multiply it by the
"debug kernel slowness" multiplier. There would be no need to modify the
starvation test in that case either (it may make sense to set the
runtime of the test to -1 so that the metadata will have an information
that the test runtime is unbound until we actually execute it though).
--
Cyril Hrubis
chrubis@suse.cz
More information about the ltp
mailing list