summaryrefslogtreecommitdiff
path: root/io_uring/poll.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-03-09 22:18:59 -0800
committerJakub Kicinski <kuba@kernel.org>2023-03-09 22:22:11 -0800
commitd0ddf5065ffef45f8fce4001abe0206081c7ff10 (patch)
treeea83817cbe9fc25261eae87b85afd9fe086f479e /io_uring/poll.c
parentdb47fa2e4cbf180a39d8e6d6170962bd7d82e52d (diff)
parent44889ba56cbb3d51154660ccd15818bc77276696 (diff)
downloadlinux-d0ddf5065ffef45f8fce4001abe0206081c7ff10.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Documentation/bpf/bpf_devel_QA.rst b7abcd9c656b ("bpf, doc: Link to submitting-patches.rst for general patch submission info") d56b0c461d19 ("bpf, docs: Fix link to netdev-FAQ target") https://lore.kernel.org/all/20230307095812.236eb1be@canb.auug.org.au/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'io_uring/poll.c')
-rw-r--r--io_uring/poll.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 8339a92b4510..795facbd0e9f 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -51,6 +51,9 @@ struct io_poll_table {
#define IO_WQE_F_DOUBLE 1
+static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ void *key);
+
static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
{
unsigned long priv = (unsigned long)wqe->private;
@@ -164,15 +167,14 @@ static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
}
}
-static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
- wait_queue_func_t wake_func)
+static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
{
poll->head = NULL;
#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
/* mask in events that we always want/need */
poll->events = events | IO_POLL_UNMASK;
INIT_LIST_HEAD(&poll->wait.entry);
- init_waitqueue_func_entry(&poll->wait, wake_func);
+ init_waitqueue_func_entry(&poll->wait, io_poll_wake);
}
static inline void io_poll_remove_entry(struct io_poll *poll)
@@ -508,7 +510,7 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
/* mark as double wq entry */
wqe_private |= IO_WQE_F_DOUBLE;
- io_init_poll_iocb(poll, first->events, first->wait.func);
+ io_init_poll_iocb(poll, first->events);
if (!io_poll_double_prepare(req)) {
/* the request is completing, just back off */
kfree(poll);
@@ -569,7 +571,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
INIT_HLIST_NODE(&req->hash_node);
req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
- io_init_poll_iocb(poll, mask, io_poll_wake);
+ io_init_poll_iocb(poll, mask);
poll->file = req->file;
req->apoll_events = poll->events;
@@ -650,6 +652,14 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
}
+/*
+ * We can't reliably detect loops in repeated poll triggers and issue
+ * subsequently failing. But rather than fail these immediately, allow a
+ * certain amount of retries before we give up. Given that this condition
+ * should _rarely_ trigger even once, we should be fine with a larger value.
+ */
+#define APOLL_MAX_RETRY 128
+
static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
unsigned issue_flags)
{
@@ -665,14 +675,18 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
if (entry == NULL)
goto alloc_apoll;
apoll = container_of(entry, struct async_poll, cache);
+ apoll->poll.retries = APOLL_MAX_RETRY;
} else {
alloc_apoll:
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
return NULL;
+ apoll->poll.retries = APOLL_MAX_RETRY;
}
apoll->double_poll = NULL;
req->apoll = apoll;
+ if (unlikely(!--apoll->poll.retries))
+ return NULL;
return apoll;
}
@@ -694,8 +708,6 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
return IO_APOLL_ABORTED;
if (!file_can_poll(req->file))
return IO_APOLL_ABORTED;
- if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
- return IO_APOLL_ABORTED;
if (!(req->flags & REQ_F_APOLL_MULTISHOT))
mask |= EPOLLONESHOT;