summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorDana Powers <dana.powers@gmail.com>2016-05-22 12:41:17 -0700
committerDana Powers <dana.powers@gmail.com>2016-05-22 12:41:17 -0700
commit96530f6a9c4a31d23b069ba162dba6cf45a5efd0 (patch)
treee9cc1eddfd4f03981762127d035c58db9c9a5269 /test
parent7941a2ac7ec6663f08c6291d92746eae9f792916 (diff)
downloadkafka-python-96530f6a9c4a31d23b069ba162dba6cf45a5efd0.tar.gz
Use Fetch/Produce API v2 for brokers >= 0.10 (uses message format v1) (#694)
Diffstat (limited to 'test')
-rw-r--r--test/test_fetcher.py15
-rw-r--r--test/test_sender.py47
2 files changed, 61 insertions, 1 deletions
diff --git a/test/test_fetcher.py b/test/test_fetcher.py
index bf4a3a9..7e529bc 100644
--- a/test/test_fetcher.py
+++ b/test/test_fetcher.py
@@ -15,7 +15,7 @@ from kafka.structs import TopicPartition, OffsetAndMetadata
@pytest.fixture
def client(mocker):
- return mocker.Mock(spec=KafkaClient)
+ return mocker.Mock(spec=KafkaClient(bootstrap_servers=[]))
@pytest.fixture
@@ -71,6 +71,19 @@ def test_init_fetches(fetcher, mocker):
assert len(ret) == len(fetch_requests)
+@pytest.mark.parametrize(("api_version", "fetch_version"), [
+ ((0, 10), 2),
+ ((0, 9), 1),
+ ((0, 8), 0)
+])
+def test_create_fetch_requests(fetcher, mocker, api_version, fetch_version):
+ fetcher._client.in_flight_request_count.return_value = 0
+ fetcher.config['api_version'] = api_version
+ by_node = fetcher._create_fetch_requests()
+ requests = by_node.values()
+ assert all([isinstance(r, FetchRequest[fetch_version]) for r in requests])
+
+
def test_update_fetch_positions(fetcher, mocker):
mocker.patch.object(fetcher, '_reset_offset')
partition = TopicPartition('foobar', 0)
diff --git a/test/test_sender.py b/test/test_sender.py
new file mode 100644
index 0000000..bb9068e
--- /dev/null
+++ b/test/test_sender.py
@@ -0,0 +1,47 @@
+# pylint: skip-file
+from __future__ import absolute_import
+
+import io
+
+import pytest
+
+from kafka.client_async import KafkaClient
+from kafka.cluster import ClusterMetadata
+from kafka.producer.buffer import MessageSetBuffer
+from kafka.producer.sender import Sender
+from kafka.producer.record_accumulator import RecordAccumulator, RecordBatch
+import kafka.errors as Errors
+from kafka.future import Future
+from kafka.protocol.produce import ProduceRequest
+from kafka.structs import TopicPartition, OffsetAndMetadata
+
+
+@pytest.fixture
+def client(mocker):
+ _cli = mocker.Mock(spec=KafkaClient(bootstrap_servers=[]))
+ _cli.cluster = mocker.Mock(spec=ClusterMetadata())
+ return _cli
+
+
+@pytest.fixture
+def accumulator():
+ return RecordAccumulator()
+
+
+@pytest.fixture
+def sender(client, accumulator):
+ return Sender(client, client.cluster, accumulator)
+
+
+@pytest.mark.parametrize(("api_version", "produce_version"), [
+ ((0, 10), 2),
+ ((0, 9), 1),
+ ((0, 8), 0)
+])
+def test_produce_request(sender, mocker, api_version, produce_version):
+ sender.config['api_version'] = api_version
+ tp = TopicPartition('foo', 0)
+ records = MessageSetBuffer(io.BytesIO(), 100000)
+ batch = RecordBatch(tp, records)
+ produce_request = sender._produce_request(0, 0, 0, [batch])
+ assert isinstance(produce_request, ProduceRequest[produce_version])