summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDana Powers <dana.powers@gmail.com>2017-06-17 08:46:54 -0700
committerDana Powers <dana.powers@gmail.com>2017-06-18 22:43:21 -0700
commitc5f1c6901f0ff1b7867c80691134d535ac645559 (patch)
treefbbbc91941121390e40063fc271d44c73b445e5b
parentb4f71229d000b01c5d7b8054ce5eca5b69177bb1 (diff)
downloadkafka-python-c5f1c6901f0ff1b7867c80691134d535ac645559.tar.gz
Increase max_buffer_size for test_large_messages
-rw-r--r--test/test_consumer_integration.py10
1 files changed, 8 insertions, 2 deletions
diff --git a/test/test_consumer_integration.py b/test/test_consumer_integration.py
index 045e81e..3c5fbd7 100644
--- a/test/test_consumer_integration.py
+++ b/test/test_consumer_integration.py
@@ -352,8 +352,14 @@ class TestConsumerIntegration(KafkaIntegrationTestCase):
# Produce 10 messages that are large (bigger than default fetch size)
large_messages = self.send_messages(0, [ random_string(5000) for x in range(10) ])
- # Consumer should still get all of them
- consumer = self.consumer()
+ # Brokers prior to 0.11 will return the next message
+ # if it is smaller than max_bytes (called buffer_size in SimpleConsumer)
+ # Brokers 0.11 and later that store messages in v2 format
+ # internally will return the next message only if the
+ # full MessageSet is smaller than max_bytes.
+ # For that reason, we set the max buffer size to a little more
+ # than the size of all large messages combined
+ consumer = self.consumer(max_buffer_size=60000)
expected_messages = set(small_messages + large_messages)
actual_messages = set([ x.message.value for x in consumer ])