[LTP] [PATCH 3/3] io_uring: Refactor io_uring01 to use common code
Sachin Sant
sachinp@linux.ibm.com
Fri Mar 20 13:47:42 CET 2026
No functional change.
Refactor io_uring01 test case to use
- common definitions from io_uring_common.h
- remove duplicate structure definitions
- Replace manual munmap/close calls with io_uring_cleanup_queue()
helper function
Signed-off-by: Sachin Sant <sachinp@linux.ibm.com>
---
.../kernel/syscalls/io_uring/io_uring01.c | 111 ++++++------------
1 file changed, 35 insertions(+), 76 deletions(-)
diff --git a/testcases/kernel/syscalls/io_uring/io_uring01.c b/testcases/kernel/syscalls/io_uring/io_uring01.c
index ab1ec00d6..368c1ed15 100644
--- a/testcases/kernel/syscalls/io_uring/io_uring01.c
+++ b/testcases/kernel/syscalls/io_uring/io_uring01.c
@@ -11,13 +11,7 @@
* registered in the kernel for long term operation using io_uring_register().
* This tests initiates I/O operations with the help of io_uring_enter().
*/
-#include <stdlib.h>
-#include <errno.h>
-#include <string.h>
-#include <fcntl.h>
-#include "config.h"
-#include "tst_test.h"
-#include "lapi/io_uring.h"
+#include "io_uring_common.h"
#define TEST_FILE "test_file"
@@ -32,42 +26,11 @@ static struct tcase {
{0, IORING_REGISTER_BUFFERS, IORING_OP_READ_FIXED},
};
-struct io_sq_ring {
- unsigned int *head;
- unsigned int *tail;
- unsigned int *ring_mask;
- unsigned int *ring_entries;
- unsigned int *flags;
- unsigned int *array;
-};
-
-struct io_cq_ring {
- unsigned int *head;
- unsigned int *tail;
- unsigned int *ring_mask;
- unsigned int *ring_entries;
- struct io_uring_cqe *cqes;
-};
-
-struct submitter {
- int ring_fd;
- struct io_sq_ring sq_ring;
- struct io_uring_sqe *sqes;
- struct io_cq_ring cq_ring;
-};
-
-static struct submitter sub_ring;
-static struct submitter *s = &sub_ring;
+static struct io_uring_submit s;
static sigset_t sig;
static struct iovec *iov;
-
-static void *sptr;
-static size_t sptr_size;
-static void *cptr;
-static size_t cptr_size;
-
-static int setup_io_uring_test(struct submitter *s, struct tcase *tc)
+static int setup_io_uring_test(struct io_uring_submit *s, struct tcase *tc)
{
struct io_sq_ring *sring = &s->sq_ring;
struct io_cq_ring *cring = &s->cq_ring;
@@ -83,43 +46,42 @@ static int setup_io_uring_test(struct submitter *s, struct tcase *tc)
return 1;
}
- sptr_size = p.sq_off.array + p.sq_entries * sizeof(unsigned int);
+ s->sq_ptr_size = p.sq_off.array + p.sq_entries * sizeof(unsigned int);
/* Submission queue ring buffer mapping */
- sptr = SAFE_MMAP(0, sptr_size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE,
- s->ring_fd, IORING_OFF_SQ_RING);
+ s->sq_ptr = SAFE_MMAP(0, s->sq_ptr_size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ s->ring_fd, IORING_OFF_SQ_RING);
/* Save global submission queue struct info */
- sring->head = sptr + p.sq_off.head;
- sring->tail = sptr + p.sq_off.tail;
- sring->ring_mask = sptr + p.sq_off.ring_mask;
- sring->ring_entries = sptr + p.sq_off.ring_entries;
- sring->flags = sptr + p.sq_off.flags;
- sring->array = sptr + p.sq_off.array;
+ sring->head = s->sq_ptr + p.sq_off.head;
+ sring->tail = s->sq_ptr + p.sq_off.tail;
+ sring->ring_mask = s->sq_ptr + p.sq_off.ring_mask;
+ sring->ring_entries = s->sq_ptr + p.sq_off.ring_entries;
+ sring->flags = s->sq_ptr + p.sq_off.flags;
+ sring->array = s->sq_ptr + p.sq_off.array;
/* Submission queue entries ring buffer mapping */
- s->sqes = SAFE_MMAP(0, p.sq_entries *
- sizeof(struct io_uring_sqe),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE,
- s->ring_fd, IORING_OFF_SQES);
+ s->sqes = SAFE_MMAP(0, p.sq_entries * sizeof(struct io_uring_sqe),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ s->ring_fd, IORING_OFF_SQES);
- cptr_size = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
+ s->cq_ptr_size = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
/* Completion queue ring buffer mapping */
- cptr = SAFE_MMAP(0, cptr_size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE,
- s->ring_fd, IORING_OFF_CQ_RING);
+ s->cq_ptr = SAFE_MMAP(0, s->cq_ptr_size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ s->ring_fd, IORING_OFF_CQ_RING);
/* Save global completion queue struct info */
- cring->head = cptr + p.cq_off.head;
- cring->tail = cptr + p.cq_off.tail;
- cring->ring_mask = cptr + p.cq_off.ring_mask;
- cring->ring_entries = cptr + p.cq_off.ring_entries;
- cring->cqes = cptr + p.cq_off.cqes;
+ cring->head = s->cq_ptr + p.cq_off.head;
+ cring->tail = s->cq_ptr + p.cq_off.tail;
+ cring->ring_mask = s->cq_ptr + p.cq_off.ring_mask;
+ cring->ring_entries = s->cq_ptr + p.cq_off.ring_entries;
+ cring->cqes = s->cq_ptr + p.cq_off.cqes;
return 0;
}
@@ -139,7 +101,7 @@ static void check_buffer(char *buffer, size_t len)
tst_res(TPASS, "Buffer filled in correctly");
}
-static void drain_uring_cq(struct submitter *s, unsigned int exp_events)
+static void drain_uring_cq(struct io_uring_submit *s, unsigned int exp_events)
{
struct io_cq_ring *cring = &s->cq_ring;
unsigned int head = *cring->head;
@@ -175,7 +137,7 @@ static void drain_uring_cq(struct submitter *s, unsigned int exp_events)
events, exp_events);
}
-static int submit_to_uring_sq(struct submitter *s, struct tcase *tc)
+static int submit_to_uring_sq(struct io_uring_submit *s, struct tcase *tc)
{
unsigned int index = 0, tail = 0, next_tail = 0;
struct io_sq_ring *sring = &s->sq_ring;
@@ -229,23 +191,20 @@ static int submit_to_uring_sq(struct submitter *s, struct tcase *tc)
static void cleanup_io_uring_test(void)
{
- io_uring_register(s->ring_fd, IORING_UNREGISTER_BUFFERS,
+ io_uring_register(s.ring_fd, IORING_UNREGISTER_BUFFERS,
NULL, QUEUE_DEPTH);
- SAFE_MUNMAP(s->sqes, sizeof(struct io_uring_sqe));
- SAFE_MUNMAP(cptr, cptr_size);
- SAFE_MUNMAP(sptr, sptr_size);
- SAFE_CLOSE(s->ring_fd);
+ io_uring_cleanup_queue(&s, QUEUE_DEPTH);
}
static void run(unsigned int n)
{
struct tcase *tc = &tcases[n];
- if (setup_io_uring_test(s, tc))
+ if (setup_io_uring_test(&s, tc))
return;
- if (!submit_to_uring_sq(s, tc))
- drain_uring_cq(s, 1);
+ if (!submit_to_uring_sq(&s, tc))
+ drain_uring_cq(&s, 1);
cleanup_io_uring_test();
}
--
2.39.1
More information about the ltp
mailing list