[LTP] [PATCH v2] aio_cancel_7-1: Write into a socket
Li Wang
liwang@redhat.com
Fri Aug 22 05:00:02 CEST 2025
Hi Martin,
On Thu, Aug 14, 2025 at 11:02 PM Martin Doucha <mdoucha@suse.cz> wrote:
> The test schedules multiple async writes into a file and then hopes that
> at least one will block long enough that it can be canceled
> before completion. Use a socket pair instead of a file to force async
> writes to block indefinitely and make sure at least one can be canceled.
>
> Also add another case to the final result check because the aio_cancel()
> call may happen between one write finishing and another write starting.
> Then the cancel will be successful and all non-zero task results will be
> ECANCELED.
>
This is a nice rework, switching to a socket pair is a smart way to
make sure aio_write() actually blocks, instead of depending on the
timing of writing to a file.
Tiny comments inline below.
> Signed-off-by: Martin Doucha <mdoucha@suse.cz>
>
Reviewed-by: Li Wang <liwang@redhat.com>
---
>
> Changes since v1: Rebased to current master.
>
> .../conformance/interfaces/aio_cancel/7-1.c | 81 ++++++++++++-------
> 1 file changed, 54 insertions(+), 27 deletions(-)
>
> diff --git
> a/testcases/open_posix_testsuite/conformance/interfaces/aio_cancel/7-1.c
> b/testcases/open_posix_testsuite/conformance/interfaces/aio_cancel/7-1.c
> index 34b263245..08f25c0b8 100644
> ---
> a/testcases/open_posix_testsuite/conformance/interfaces/aio_cancel/7-1.c
> +++
> b/testcases/open_posix_testsuite/conformance/interfaces/aio_cancel/7-1.c
@@ -29,71 +29,82 @@
> #include <sys/types.h>
> #include <unistd.h>
> #include <sys/stat.h>
> -#include <fcntl.h>
> #include <string.h>
> #include <errno.h>
> #include <stdlib.h>
> #include <aio.h>
> +#include <sys/socket.h>
>
> #include "posixtest.h"
> -#include "tempfile.h"
>
> #define TNAME "aio_cancel/7-1.c"
>
> -#define BUF_NB 128
> -#define BUF_SIZE 1024
> +#define BUF_NB 8
>
> int main(void)
> {
> - char tmpfname[PATH_MAX];
> - int fd;
> + int fds[2];
> struct aiocb *aiocb[BUF_NB];
> int i;
> int in_progress;
> int gret;
> + int bufsize;
>
> + unsigned int argsize = sizeof(bufsize);
>
getsockopt() is prefer to have 'socklen_t *' in optlen so we may need:
socklen_t argsize = sizeof(bufsize);
> if (sysconf(_SC_ASYNCHRONOUS_IO) < 200112L)
> return PTS_UNSUPPORTED;
>
> - PTS_GET_TMP_FILENAME(tmpfname, "pts_aio_cancel_7_1");
> - unlink(tmpfname);
> - fd = open(tmpfname, O_CREAT | O_RDWR | O_EXCL, S_IRUSR | S_IWUSR);
> - if (fd == -1) {
> - printf(TNAME " Error at open(): %s\n", strerror(errno));
> + gret = socketpair(AF_UNIX, SOCK_DGRAM, 0, fds);
> + if (gret == -1) {
> + printf(TNAME " Error creating sockets(): %s\n",
> + strerror(errno));
> return PTS_UNRESOLVED;
> }
>
> - unlink(tmpfname);
> + gret = getsockopt(fds[0], SOL_SOCKET, SO_SNDBUF, &bufsize,
> &argsize);
> + if (gret == -1) {
> + printf(TNAME " Error reading socket buffer size: %s\n",
> + strerror(errno));
>
> + close(fds[0]);
> + close(fds[1]);
> + return PTS_UNRESOLVED;
>
Maybe we could wrap such cleanup code within cleanup_and_return(),
then avoid the dup code in many places.
static int cleanup_and_return(int fds[2], int ret)
{
close(fds[0]);
close(fds[1]);
return ret;
}
return cleanup_and_return(fds, PTS_UNRESOLVED);
return cleanup_and_return(fds, PTS_FAIL);
return cleanup_and_return(fds, PTS_PASS);
> + }
> +
> + /* Socket buffer size is twice the maximum message size */
> + bufsize /= 2;
>
> /* create AIO req */
> for (i = 0; i < BUF_NB; i++) {
> aiocb[i] = calloc(1, sizeof(struct aiocb));
>
> if (aiocb[i] == NULL) {
> - printf(TNAME " Error at malloc(): %s\n",
> + printf(TNAME " Error at calloc(): %s\n",
> strerror(errno));
> - close(fd);
> + close(fds[0]);
> + close(fds[1]);
> return PTS_UNRESOLVED;
> }
>
> - aiocb[i]->aio_fildes = fd;
> - aiocb[i]->aio_buf = malloc(BUF_SIZE);
> + aiocb[i]->aio_fildes = fds[0];
> + aiocb[i]->aio_buf = malloc(bufsize);
>
> if (aiocb[i]->aio_buf == NULL) {
> printf(TNAME " Error at malloc(): %s\n",
> strerror(errno));
> - close(fd);
> + close(fds[0]);
> + close(fds[1]);
> return PTS_UNRESOLVED;
> }
>
> - aiocb[i]->aio_nbytes = BUF_SIZE;
> + aiocb[i]->aio_nbytes = bufsize;
> aiocb[i]->aio_offset = 0;
> aiocb[i]->aio_sigevent.sigev_notify = SIGEV_NONE;
>
> if (aio_write(aiocb[i]) == -1) {
> printf(TNAME " loop %d: Error at aio_write():
> %s\n",
> i, strerror(errno));
> - close(fd);
> + close(fds[0]);
> + close(fds[1]);
> return PTS_FAIL;
> }
> }
> @@ -101,10 +112,11 @@ int main(void)
> /* try to cancel all
> * we hope to have enough time to cancel at least one
> */
> - gret = aio_cancel(fd, NULL);
> + gret = aio_cancel(fds[0], NULL);
> if (gret == -1) {
> printf(TNAME " Error at aio_cancel(): %s\n",
> strerror(errno));
> - close(fd);
> + close(fds[0]);
> + close(fds[1]);
> return PTS_FAIL;
> }
>
> @@ -117,9 +129,9 @@ int main(void)
> case -1:
> printf(TNAME " Error at aio_error(): %s\n",
> strerror(errno));
> - close(fd);
> + close(fds[0]);
> + close(fds[1]);
> return PTS_FAIL;
> - break;
> case EINPROGRESS:
> /* at this point, all operations should be:
> * canceled
> @@ -130,12 +142,26 @@ int main(void)
> printf(TNAME
> " Error at aio_error():
> %s\n",
> strerror(errno));
> - close(fd);
> + close(fds[0]);
> + close(fds[1]);
> return PTS_FAIL;
> }
>
> in_progress = 1;
> break;
> + case ECANCELED:
> + /* aio_cancel() happened between one
> + * write finishing and another starting.
> + * aio_cancel() returned AIO_CANCELED
> + * and the first non-zero result is
> ECANCELED.
> + */
> + if (gret == AIO_CANCELED) {
> + printf("Test PASSED\n");
> + close(fds[0]);
> + close(fds[1]);
> + return PTS_PASS;
> + }
> + break;
> case 0:
> /* we seek one not canceled and check why.
> * (perhaps) it has not been canceled
> @@ -144,7 +170,8 @@ int main(void)
> */
> if (gret == AIO_NOTCANCELED) {
> printf("Test PASSED\n");
> - close(fd);
> + close(fds[0]);
> + close(fds[1]);
> return PTS_PASS;
> }
> break;
> @@ -152,7 +179,7 @@ int main(void)
> }
> } while (in_progress);
>
> - close(fd);
> -
> + close(fds[0]);
> + close(fds[1]);
> return PTS_UNRESOLVED;
> }
> --
> 2.50.1
>
>
> --
> Mailing list info: https://lists.linux.it/listinfo/ltp
>
>
--
Regards,
Li Wang
More information about the ltp
mailing list