summaryrefslogtreecommitdiff
path: root/kafka
diff options
context:
space:
mode:
Diffstat (limited to 'kafka')
-rw-r--r--kafka/client_async.py12
-rw-r--r--kafka/consumer/fetcher.py3
-rw-r--r--kafka/consumer/group.py6
-rw-r--r--kafka/producer/sender.py2
4 files changed, 9 insertions, 14 deletions
diff --git a/kafka/client_async.py b/kafka/client_async.py
index ecd2cea..4e4e835 100644
--- a/kafka/client_async.py
+++ b/kafka/client_async.py
@@ -495,7 +495,7 @@ class KafkaClient(object):
return self._conns[node_id].send(request)
- def poll(self, timeout_ms=None, future=None, sleep=True, delayed_tasks=True):
+ def poll(self, timeout_ms=None, future=None, delayed_tasks=True):
"""Try to read and write to sockets.
This method will also attempt to complete node connections, refresh
@@ -507,9 +507,6 @@ class KafkaClient(object):
timeout will be the minimum of timeout, request timeout and
metadata timeout. Default: request_timeout_ms
future (Future, optional): if provided, blocks until future.is_done
- sleep (bool): if True and there is nothing to do (no connections
- or requests in flight), will sleep for duration timeout before
- returning empty results. Default: False.
Returns:
list: responses received (can be empty)
@@ -553,7 +550,7 @@ class KafkaClient(object):
self.config['request_timeout_ms'])
timeout = max(0, timeout / 1000.0) # avoid negative timeouts
- responses.extend(self._poll(timeout, sleep=sleep))
+ responses.extend(self._poll(timeout))
# If all we had was a timeout (future is None) - only do one poll
# If we do have a future, we keep looping until it is done
@@ -562,10 +559,7 @@ class KafkaClient(object):
return responses
- def _poll(self, timeout, sleep=True):
- # select on reads across all connected sockets, blocking up to timeout
- assert self.in_flight_request_count() > 0 or self._connecting or sleep
-
+ def _poll(self, timeout):
responses = []
processed = set()
diff --git a/kafka/consumer/fetcher.py b/kafka/consumer/fetcher.py
index c0d6075..10ed187 100644
--- a/kafka/consumer/fetcher.py
+++ b/kafka/consumer/fetcher.py
@@ -275,8 +275,7 @@ class Fetcher(six.Iterator):
if future.exception.invalid_metadata:
refresh_future = self._client.cluster.request_update()
- self._client.poll(
- future=refresh_future, sleep=True, timeout_ms=remaining_ms)
+ self._client.poll(future=refresh_future, timeout_ms=remaining_ms)
else:
time.sleep(self.config['retry_backoff_ms'] / 1000.0)
diff --git a/kafka/consumer/group.py b/kafka/consumer/group.py
index 54a3711..2de254d 100644
--- a/kafka/consumer/group.py
+++ b/kafka/consumer/group.py
@@ -613,7 +613,7 @@ class KafkaConsumer(six.Iterator):
# Send any new fetches (won't resend pending fetches)
self._fetcher.send_fetches()
- self._client.poll(timeout_ms=timeout_ms, sleep=True)
+ self._client.poll(timeout_ms=timeout_ms)
records, _ = self._fetcher.fetched_records(max_records)
return records
@@ -1019,7 +1019,7 @@ class KafkaConsumer(six.Iterator):
poll_ms = 1000 * (self._consumer_timeout - time.time())
if not self._fetcher.in_flight_fetches():
poll_ms = 0
- self._client.poll(timeout_ms=poll_ms, sleep=True)
+ self._client.poll(timeout_ms=poll_ms)
# We need to make sure we at least keep up with scheduled tasks,
# like heartbeats, auto-commits, and metadata refreshes
@@ -1045,6 +1045,8 @@ class KafkaConsumer(six.Iterator):
if time.time() > timeout_at:
log.debug("internal iterator timeout - breaking for poll")
break
+ if self._client.in_flight_request_count():
+ self._client.poll(timeout_ms=0)
# An else block on a for loop only executes if there was no break
# so this should only be called on a StopIteration from the fetcher
diff --git a/kafka/producer/sender.py b/kafka/producer/sender.py
index 2974faf..ad59050 100644
--- a/kafka/producer/sender.py
+++ b/kafka/producer/sender.py
@@ -156,7 +156,7 @@ class Sender(threading.Thread):
# difference between now and its linger expiry time; otherwise the
# select time will be the time difference between now and the
# metadata expiry time
- self._client.poll(poll_timeout_ms, sleep=True)
+ self._client.poll(poll_timeout_ms)
def initiate_close(self):
"""Start closing the sender (won't complete until all data is sent)."""