The change refactors the caching mechanism; it doesn't change the implementation behaviour. Making cqe_cahced and cqe_sentinel indices instead of pointers allows easier bookkeeping in upcoming patches that enable compat handling at the io_uring_cqe level.
Signed-off-by: Tudor Cretu tudor.cretu@arm.com --- include/linux/io_uring_types.h | 4 ++-- io_uring/io_uring.c | 2 +- io_uring/io_uring.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 440179029a8f0..3d14c6feb51b6 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -277,8 +277,8 @@ struct io_ring_ctx { * We cache a range of free CQEs we can use, once exhausted it * should go through a slower range setup, see __io_get_cqe() */ - struct io_uring_cqe *cqe_cached; - struct io_uring_cqe *cqe_sentinel; + unsigned int cqe_cached; + unsigned int cqe_sentinel;
unsigned cached_cq_tail; unsigned cq_entries; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 707229ae04dc8..fb6d07e1e7358 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -767,7 +767,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow) len <<= 1; }
- ctx->cqe_cached = &ctx->cqes[off]; + ctx->cqe_cached = off; ctx->cqe_sentinel = ctx->cqe_cached + len;
ctx->cached_cq_tail++; diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 50bc3af449534..6d9720dd8f469 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -97,7 +97,7 @@ static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx, bool overflow) { if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { - struct io_uring_cqe *cqe = ctx->cqe_cached; + struct io_uring_cqe *cqe = &ctx->cqes[ctx->cqe_cached];
ctx->cached_cq_tail++; ctx->cqe_cached++;