summaryrefslogtreecommitdiff
path: root/io_uring/rsrc.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-02-22 14:36:50 +0000
committerJens Axboe <axboe@kernel.dk>2023-02-22 09:57:23 -0700
commitb000ae0ec2d709046ac1a3c5722fea417f8a067e (patch)
tree91a0bf402c0a9b2eb5f5f9ba4471f3af3de956c6 /io_uring/rsrc.c
parentedd478269640b360c6f301f2baa04abdda563ef3 (diff)
downloadlinux-b000ae0ec2d709046ac1a3c5722fea417f8a067e.tar.gz
io_uring/rsrc: optimise single entry advance
Iterating within the first bvec entry should be essentially free, but we use iov_iter_advance() for that, which shows up in benchmark profiles taking up to 0.5% of CPU. Replace it with a hand coded version. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/rsrc.c')
-rw-r--r--io_uring/rsrc.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 53845e496881..ebbd2cea7582 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1364,7 +1364,10 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
const struct bio_vec *bvec = imu->bvec;
if (offset <= bvec->bv_len) {
- iov_iter_advance(iter, offset);
+ iter->bvec = bvec;
+ iter->nr_segs = bvec->bv_len;
+ iter->count -= offset;
+ iter->iov_offset = offset;
} else {
unsigned long seg_skip;