summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Widman <jeff@jeffwidman.com>2018-12-13 15:24:50 -0800
committerJeff Widman <jeff@jeffwidman.com>2019-01-13 13:53:18 -0800
commite54f5a3677f8c0dd89bff7be9545db33c8762596 (patch)
treefbac310bd564e3faa212137ce58f6e0523fedb35
parentac5a935d0c8295fd66d7d3b86e266f05b09b4091 (diff)
downloadkafka-python-e54f5a3677f8c0dd89bff7be9545db33c8762596.tar.gz
Remove unused `skip_double_compressed_messages`
This `skip_double_compressed_messages` flag was added in https://github.com/dpkp/kafka-python/pull/755 in order to fix https://github.com/dpkp/kafka-python/issues/718. However, grep'ing through the code, it looks like it this is no longer used anywhere and doesn't do anything. So removing it.
-rw-r--r--kafka/consumer/fetcher.py8
-rw-r--r--kafka/consumer/group.py8
2 files changed, 0 insertions, 16 deletions
diff --git a/kafka/consumer/fetcher.py b/kafka/consumer/fetcher.py
index 3638831..c1eb03e 100644
--- a/kafka/consumer/fetcher.py
+++ b/kafka/consumer/fetcher.py
@@ -55,7 +55,6 @@ class Fetcher(six.Iterator):
'max_partition_fetch_bytes': 1048576,
'max_poll_records': sys.maxsize,
'check_crcs': True,
- 'skip_double_compressed_messages': False,
'iterator_refetch_records': 1, # undocumented -- interface may change
'metric_group_prefix': 'consumer',
'api_version': (0, 8, 0),
@@ -98,13 +97,6 @@ class Fetcher(six.Iterator):
consumed. This ensures no on-the-wire or on-disk corruption to
the messages occurred. This check adds some overhead, so it may
be disabled in cases seeking extreme performance. Default: True
- skip_double_compressed_messages (bool): A bug in KafkaProducer
- caused some messages to be corrupted via double-compression.
- By default, the fetcher will return the messages as a compressed
- blob of bytes with a single offset, i.e. how the message was
- actually published to the cluster. If you prefer to have the
- fetcher automatically detect corrupt messages and skip them,
- set this option to True. Default: False.
"""
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
diff --git a/kafka/consumer/group.py b/kafka/consumer/group.py
index 699b02b..8d2c65e 100644
--- a/kafka/consumer/group.py
+++ b/kafka/consumer/group.py
@@ -165,13 +165,6 @@ class KafkaConsumer(six.Iterator):
consumer_timeout_ms (int): number of milliseconds to block during
message iteration before raising StopIteration (i.e., ending the
iterator). Default block forever [float('inf')].
- skip_double_compressed_messages (bool): A bug in KafkaProducer <= 1.2.4
- caused some messages to be corrupted via double-compression.
- By default, the fetcher will return these messages as a compressed
- blob of bytes with a single offset, i.e. how the message was
- actually published to the cluster. If you prefer to have the
- fetcher automatically detect corrupt messages and skip them,
- set this option to True. Default: False.
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
@@ -279,7 +272,6 @@ class KafkaConsumer(six.Iterator):
'sock_chunk_bytes': 4096, # undocumented experimental option
'sock_chunk_buffer_count': 1000, # undocumented experimental option
'consumer_timeout_ms': float('inf'),
- 'skip_double_compressed_messages': False,
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,