summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarson Ip <carsonip715@gmail.com>2019-10-25 16:41:07 +0800
committerJeff Widman <jeff@jeffwidman.com>2019-11-08 05:21:16 -0800
commit3861e16ea4ef8d60bc6ffc51c0183da33c629642 (patch)
treee72741a5edf3eb2fe141fe2c37a2001f3797989b
parent736218da447cc97624d1f0838f1a15fefc24bd24 (diff)
downloadkafka-python-3861e16ea4ef8d60bc6ffc51c0183da33c629642.tar.gz
Fix typos
-rwxr-xr-xbenchmarks/consumer_performance.py2
-rw-r--r--kafka/admin/client.py2
-rw-r--r--kafka/client_async.py2
-rw-r--r--kafka/conn.py4
-rw-r--r--kafka/consumer/fetcher.py4
-rw-r--r--kafka/coordinator/consumer.py2
-rw-r--r--kafka/producer/kafka.py2
-rw-r--r--kafka/record/util.py2
-rw-r--r--test/record/test_records.py2
9 files changed, 11 insertions, 11 deletions
diff --git a/benchmarks/consumer_performance.py b/benchmarks/consumer_performance.py
index d7580ce..9e3b6a9 100755
--- a/benchmarks/consumer_performance.py
+++ b/benchmarks/consumer_performance.py
@@ -157,7 +157,7 @@ def get_args_parser():
default=100)
parser.add_argument(
'--consumer-config', type=str, nargs='+', default=(),
- help='kafka consumer related configuaration properties like '
+ help='kafka consumer related configuration properties like '
'bootstrap_servers,client_id etc..')
parser.add_argument(
'--fixture-compression', type=str,
diff --git a/kafka/admin/client.py b/kafka/admin/client.py
index bb1e2b5..cc126c6 100644
--- a/kafka/admin/client.py
+++ b/kafka/admin/client.py
@@ -103,7 +103,7 @@ class KafkaAdminClient(object):
should verify that the certificate matches the broker's hostname.
Default: True.
ssl_cafile (str): Optional filename of CA file to use in certificate
- veriication. Default: None.
+ verification. Default: None.
ssl_certfile (str): Optional filename of file in PEM format containing
the client certificate, as well as any CA certificates needed to
establish the certificate's authenticity. Default: None.
diff --git a/kafka/client_async.py b/kafka/client_async.py
index 14677d0..87b3fe0 100644
--- a/kafka/client_async.py
+++ b/kafka/client_async.py
@@ -111,7 +111,7 @@ class KafkaClient(object):
should verify that the certificate matches the broker's hostname.
Default: True.
ssl_cafile (str): Optional filename of CA file to use in certificate
- veriication. Default: None.
+ verification. Default: None.
ssl_certfile (str): Optional filename of file in PEM format containing
the client certificate, as well as any CA certificates needed to
establish the certificate's authenticity. Default: None.
diff --git a/kafka/conn.py b/kafka/conn.py
index 815065b..bc01078 100644
--- a/kafka/conn.py
+++ b/kafka/conn.py
@@ -251,7 +251,7 @@ class BrokerConnection(object):
self.config['send_buffer_bytes']))
assert self.config['security_protocol'] in self.SECURITY_PROTOCOLS, (
- 'security_protcol must be in ' + ', '.join(self.SECURITY_PROTOCOLS))
+ 'security_protocol must be in ' + ', '.join(self.SECURITY_PROTOCOLS))
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
assert ssl_available, "Python wasn't built with SSL support"
@@ -1196,7 +1196,7 @@ class BrokerConnection(object):
# by looking at ApiVersionResponse
api_versions = self._handle_api_version_response(f.value)
version = self._infer_broker_version_from_api_versions(api_versions)
- log.info('Broker version identifed as %s', '.'.join(map(str, version)))
+ log.info('Broker version identified as %s', '.'.join(map(str, version)))
log.info('Set configuration api_version=%s to skip auto'
' check_version requests on startup', version)
break
diff --git a/kafka/consumer/fetcher.py b/kafka/consumer/fetcher.py
index 5434c36..f9d96b0 100644
--- a/kafka/consumer/fetcher.py
+++ b/kafka/consumer/fetcher.py
@@ -255,7 +255,7 @@ class Fetcher(six.Iterator):
Arguments:
timestamps: {TopicPartition: int} dict with timestamps to fetch
offsets by. -1 for the latest available, -2 for the earliest
- available. Otherwise timestamp is treated as epoch miliseconds.
+ available. Otherwise timestamp is treated as epoch milliseconds.
Returns:
{TopicPartition: (int, int)}: Mapping of partition to
@@ -291,7 +291,7 @@ class Fetcher(six.Iterator):
self._client.poll(future=refresh_future, timeout_ms=remaining_ms)
# Issue #1780
- # Recheck partition existance after after a successful metadata refresh
+ # Recheck partition existence after after a successful metadata refresh
if refresh_future.succeeded() and isinstance(future.exception, Errors.StaleMetadata):
log.debug("Stale metadata was raised, and we now have an updated metadata. Rechecking partition existance")
unknown_partition = future.exception.args[0] # TopicPartition from StaleMetadata
diff --git a/kafka/coordinator/consumer.py b/kafka/coordinator/consumer.py
index 9b7a3cd..30337c3 100644
--- a/kafka/coordinator/consumer.py
+++ b/kafka/coordinator/consumer.py
@@ -69,7 +69,7 @@ class ConsumerCoordinator(BaseCoordinator):
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
session_timeout_ms (int): The timeout used to detect failures when
- using Kafka's group managementment facilities. Default: 30000
+ using Kafka's group management facilities. Default: 30000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
exclude_internal_topics (bool): Whether records from internal topics
diff --git a/kafka/producer/kafka.py b/kafka/producer/kafka.py
index 3ff1a09..67b9e19 100644
--- a/kafka/producer/kafka.py
+++ b/kafka/producer/kafka.py
@@ -155,7 +155,7 @@ class KafkaProducer(object):
'linger' for the specified time waiting for more records to show
up. This setting defaults to 0 (i.e. no delay). Setting linger_ms=5
would have the effect of reducing the number of requests sent but
- would add up to 5ms of latency to records sent in the absense of
+ would add up to 5ms of latency to records sent in the absence of
load. Default: 0.
partitioner (callable): Callable used to determine which partition
each message is assigned to. Called (after key serialization):
diff --git a/kafka/record/util.py b/kafka/record/util.py
index 74b9a69..2f8286d 100644
--- a/kafka/record/util.py
+++ b/kafka/record/util.py
@@ -91,7 +91,7 @@ def decode_varint(buffer, pos=0):
on how those can be produced.
Arguments:
- buffer (bytearry): buffer to read from.
+ buffer (bytearray): buffer to read from.
pos (int): optional position to read from
Returns:
diff --git a/test/record/test_records.py b/test/record/test_records.py
index f1b8baa..9f72234 100644
--- a/test/record/test_records.py
+++ b/test/record/test_records.py
@@ -195,7 +195,7 @@ def test_memory_records_builder(magic, compression_type):
size_before_close = builder.size_in_bytes()
assert size_before_close == sum(msg_sizes) + base_size
- # Size should remain the same after closing. No traling bytes
+ # Size should remain the same after closing. No trailing bytes
builder.close()
assert builder.compression_rate() > 0
expected_size = size_before_close * builder.compression_rate()