summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Roberts <wizzat@gmail.com>2015-01-15 00:49:46 -0800
committerMark Roberts <wizzat@gmail.com>2015-01-15 00:49:46 -0800
commite6b37c00c5b0a52f821b35a4e26c0943f3e05b07 (patch)
tree14b58764d5d711360d04606e10cec7dc6d6400af
parent28a838516b3feb81c3c7f7fbd6aad2391a4139c5 (diff)
parent21a5ca834b9e0831b29870b5d099263892f21a7d (diff)
downloadkafka-python-e6b37c00c5b0a52f821b35a4e26c0943f3e05b07.tar.gz
Merge pull request #295 from dpkp/kafka_0_8_2
Kafka 0.8.2.0 updates
-rw-r--r--kafka/client.py7
-rw-r--r--servers/0.8.2.0/resources/kafka.properties118
-rw-r--r--servers/0.8.2.0/resources/log4j.properties24
-rw-r--r--servers/0.8.2.0/resources/zookeeper.properties21
-rw-r--r--test/fixtures.py6
-rw-r--r--test/test_client_integration.py2
-rw-r--r--test/test_consumer_integration.py4
7 files changed, 175 insertions, 7 deletions
diff --git a/kafka/client.py b/kafka/client.py
index bc3d853..7a0cf18 100644
--- a/kafka/client.py
+++ b/kafka/client.py
@@ -11,7 +11,7 @@ from kafka.common import (TopicAndPartition, BrokerMetadata,
ConnectionError, FailedPayloadsError,
KafkaTimeoutError, KafkaUnavailableError,
LeaderNotAvailableError, UnknownTopicOrPartitionError,
- NotLeaderForPartitionError)
+ NotLeaderForPartitionError, ReplicaNotAvailableError)
from kafka.conn import collect_hosts, KafkaConnection, DEFAULT_SOCKET_TIMEOUT_SECONDS
from kafka.protocol import KafkaProtocol
@@ -350,6 +350,11 @@ class KafkaClient(object):
log.error('No leader for topic %s partition %d', topic, partition)
self.topics_to_brokers[topic_part] = None
continue
+ # If one of the replicas is unavailable -- ignore
+ # this error code is provided for admin purposes only
+ # we never talk to replicas, only the leader
+ except ReplicaNotAvailableError:
+ log.warning('Some (non-leader) replicas not available for topic %s partition %d', topic, partition)
# If Known Broker, topic_partition -> BrokerMetadata
if leader in self.brokers:
diff --git a/servers/0.8.2.0/resources/kafka.properties b/servers/0.8.2.0/resources/kafka.properties
new file mode 100644
index 0000000..a638f39
--- /dev/null
+++ b/servers/0.8.2.0/resources/kafka.properties
@@ -0,0 +1,118 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id={broker_id}
+
+############################# Socket Server Settings #############################
+
+# The port the socket server listens on
+port={port}
+
+# Hostname the broker will bind to. If not set, the server will bind to all interfaces
+host.name={host}
+
+# Hostname the broker will advertise to producers and consumers. If not set, it uses the
+# value for "host.name" if configured. Otherwise, it will use the value returned from
+# java.net.InetAddress.getCanonicalHostName().
+#advertised.host.name=<hostname routable by clients>
+
+# The port to publish to ZooKeeper for clients to use. If this is not set,
+# it will publish the same port that the broker binds to.
+#advertised.port=<port accessible by clients>
+
+# The number of threads handling network requests
+num.network.threads=2
+
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=1048576
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=1048576
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs={tmp_dir}/data
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions={partitions}
+default.replication.factor={replicas}
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=536870912
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=60000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect={zk_host}:{zk_port}/{zk_chroot}
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=1000000
diff --git a/servers/0.8.2.0/resources/log4j.properties b/servers/0.8.2.0/resources/log4j.properties
new file mode 100644
index 0000000..f863b3b
--- /dev/null
+++ b/servers/0.8.2.0/resources/log4j.properties
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+log4j.rootLogger=INFO, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.logger.kafka=DEBUG, stdout
+log4j.logger.org.I0Itec.zkclient.ZkClient=INFO, stdout
+log4j.logger.org.apache.zookeeper=INFO, stdout
diff --git a/servers/0.8.2.0/resources/zookeeper.properties b/servers/0.8.2.0/resources/zookeeper.properties
new file mode 100644
index 0000000..e3fd097
--- /dev/null
+++ b/servers/0.8.2.0/resources/zookeeper.properties
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# the directory where the snapshot is stored.
+dataDir={tmp_dir}
+# the port at which the clients will connect
+clientPort={port}
+clientPortAddress={host}
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
diff --git a/test/fixtures.py b/test/fixtures.py
index b286619..3c496fd 100644
--- a/test/fixtures.py
+++ b/test/fixtures.py
@@ -123,7 +123,7 @@ class ZookeeperFixture(Fixture):
# Party!
self.out("Starting...")
self.child.start()
- self.child.wait_for(r"Snapshotting")
+ self.child.wait_for(r"binding to port")
self.out("Done!")
def close(self):
@@ -212,8 +212,8 @@ class KafkaFixture(Fixture):
if proc.wait() != 0:
self.out("Failed to create Zookeeper chroot node")
- self.out(proc.stdout)
- self.out(proc.stderr)
+ self.out(proc.stdout.read())
+ self.out(proc.stderr.read())
raise RuntimeError("Failed to create Zookeeper chroot node")
self.out("Done!")
diff --git a/test/test_client_integration.py b/test/test_client_integration.py
index cc60778..c0331ea 100644
--- a/test/test_client_integration.py
+++ b/test/test_client_integration.py
@@ -54,7 +54,7 @@ class TestKafkaClientIntegration(KafkaIntegrationTestCase):
# Offset Tests #
####################
- @kafka_versions("0.8.1", "0.8.1.1")
+ @kafka_versions("0.8.1", "0.8.1.1", "0.8.2.0")
def test_commit_fetch_offsets(self):
req = OffsetCommitRequest(self.topic, 0, 42, b"metadata")
(resp,) = self.client.send_offset_commit_request(b"group", [req])
diff --git a/test/test_consumer_integration.py b/test/test_consumer_integration.py
index ea32318..4723220 100644
--- a/test/test_consumer_integration.py
+++ b/test/test_consumer_integration.py
@@ -257,7 +257,7 @@ class TestConsumerIntegration(KafkaIntegrationTestCase):
big_consumer.stop()
- @kafka_versions("0.8.1", "0.8.1.1")
+ @kafka_versions("0.8.1", "0.8.1.1", "0.8.2.0")
def test_offset_behavior__resuming_behavior(self):
self.send_messages(0, range(0, 100))
self.send_messages(1, range(100, 200))
@@ -357,7 +357,7 @@ class TestConsumerIntegration(KafkaIntegrationTestCase):
self.assertEqual(len(messages), 5)
self.assertGreaterEqual(t.interval, TIMEOUT_MS / 1000.0 )
- @kafka_versions("0.8.1", "0.8.1.1")
+ @kafka_versions("0.8.1", "0.8.1.1", "0.8.2.0")
def test_kafka_consumer__offset_commit_resume(self):
GROUP_ID = random_string(10)