summaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorRafael H. Schloming <rhs@apache.org>2009-12-26 12:42:57 +0000
committerRafael H. Schloming <rhs@apache.org>2009-12-26 12:42:57 +0000
commit248f1fe188fe2307b9dcf2c87a83b653eaa1920c (patch)
treed5d0959a70218946ff72e107a6c106e32479a398 /python
parent3c83a0e3ec7cf4dc23e83a340b25f5fc1676f937 (diff)
downloadqpid-python-248f1fe188fe2307b9dcf2c87a83b653eaa1920c.tar.gz
synchronized with trunk except for ruby dir
git-svn-id: https://svn.apache.org/repos/asf/qpid/branches/qpid.rnr@893970 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'python')
-rw-r--r--[-rwxr-xr-x]python/LICENSE.txt0
-rw-r--r--python/Makefile98
-rw-r--r--python/README.txt58
-rw-r--r--python/RELEASE_NOTES32
-rwxr-xr-xpython/amqp-doc80
-rwxr-xr-xpython/commands/qpid-cluster328
-rwxr-xr-xpython/commands/qpid-config474
-rwxr-xr-xpython/commands/qpid-printevents74
-rwxr-xr-xpython/commands/qpid-queue-stats203
-rwxr-xr-xpython/commands/qpid-route593
-rwxr-xr-xpython/commands/qpid-stat460
-rwxr-xr-xpython/commands/qpid-tool6
-rw-r--r--python/cpp_failing_0-10.txt0
-rw-r--r--python/cpp_failing_0-8.txt0
-rw-r--r--python/cpp_failing_0-9.txt4
-rw-r--r--python/doc/test-requirements.txt19
-rw-r--r--python/examples/README319
-rwxr-xr-xpython/examples/api/drain62
-rwxr-xr-xpython/examples/api/server87
-rwxr-xr-xpython/examples/api/spout103
-rwxr-xr-xpython/examples/datatypes/client.py122
-rwxr-xr-xpython/examples/datatypes/server.py124
-rw-r--r--python/examples/datatypes/testdata.py180
-rwxr-xr-xpython/examples/direct/declare_queues.py20
-rwxr-xr-xpython/examples/direct/direct_consumer.py20
-rwxr-xr-xpython/examples/direct/direct_producer.py20
-rwxr-xr-xpython/examples/direct/listener.py20
-rw-r--r--python/examples/direct/verify19
-rwxr-xr-xpython/examples/fanout/fanout_consumer.py20
-rwxr-xr-xpython/examples/fanout/fanout_producer.py20
-rwxr-xr-xpython/examples/fanout/listener.py20
-rw-r--r--python/examples/fanout/verify19
-rwxr-xr-xpython/examples/headers/declare_queues.py77
-rwxr-xr-xpython/examples/headers/headers_consumer.py107
-rwxr-xr-xpython/examples/headers/headers_producer.py79
-rw-r--r--python/examples/headers/verify (renamed from python/tests_0-9/execution.py)13
-rw-r--r--python/examples/headers/verify.in25
-rwxr-xr-xpython/examples/pubsub/topic_publisher.py18
-rwxr-xr-xpython/examples/pubsub/topic_subscriber.py20
-rw-r--r--python/examples/pubsub/verify19
-rwxr-xr-xpython/examples/request-response/client.py20
-rwxr-xr-xpython/examples/request-response/server.py20
-rw-r--r--python/examples/request-response/verify19
-rwxr-xr-xpython/examples/xml-exchange/declare_queues.py20
-rwxr-xr-xpython/examples/xml-exchange/listener.py20
-rw-r--r--python/examples/xml-exchange/verify19
-rwxr-xr-xpython/examples/xml-exchange/xml_consumer.py20
-rwxr-xr-xpython/examples/xml-exchange/xml_producer.py20
-rwxr-xr-xpython/hello-world31
-rw-r--r--python/java_failing_0-8.txt2
-rw-r--r--python/java_failing_0-9.txt18
-rw-r--r--python/mllib/__init__.py30
-rw-r--r--python/mllib/dom.py15
-rw-r--r--python/models/fedsim/__init__.py19
-rw-r--r--python/models/fedsim/fedsim.py434
-rw-r--r--python/models/fedsim/testBig.py88
-rw-r--r--python/models/fedsim/testRing.py48
-rw-r--r--python/models/fedsim/testStar.py65
-rw-r--r--python/models/fedsim/testStarAdd.py56
-rwxr-xr-xpython/pal2py274
-rwxr-xr-xpython/perftest95
-rwxr-xr-xpython/preppy67
-rw-r--r--python/qmf/__init__.py18
-rw-r--r--python/qmf/console.py1970
-rwxr-xr-xpython/qpid-python-test575
-rw-r--r--python/qpid/address.py161
-rw-r--r--python/qpid/assembler.py118
-rw-r--r--python/qpid/brokertest.py480
-rw-r--r--python/qpid/client.py7
-rw-r--r--python/qpid/codec010.py255
-rw-r--r--python/qpid/compat.py94
-rw-r--r--python/qpid/concurrency.py100
-rw-r--r--python/qpid/connection.py96
-rw-r--r--python/qpid/connection08.py21
-rw-r--r--python/qpid/datatypes.py107
-rw-r--r--python/qpid/debug.py55
-rw-r--r--python/qpid/delegates.py128
-rw-r--r--python/qpid/disp.py171
-rw-r--r--python/qpid/driver.py859
-rw-r--r--python/qpid/exceptions.py1
-rw-r--r--python/qpid/framer.py107
-rw-r--r--python/qpid/framing.py310
-rw-r--r--python/qpid/generator.py56
-rw-r--r--python/qpid/harness.py20
-rw-r--r--python/qpid/invoker.py48
-rw-r--r--python/qpid/lexer.py112
-rw-r--r--python/qpid/management.py300
-rw-r--r--python/qpid/managementdata.py170
-rw-r--r--python/qpid/message.py1
-rw-r--r--python/qpid/messaging.py822
-rw-r--r--python/qpid/mimetype.py106
-rw-r--r--python/qpid/ops.py280
-rw-r--r--python/qpid/parser.py68
-rw-r--r--python/qpid/peer.py12
-rw-r--r--python/qpid/queue.py4
-rw-r--r--python/qpid/selector.py139
-rw-r--r--python/qpid/session.py227
-rw-r--r--python/qpid/spec.py6
-rw-r--r--python/qpid/spec010.py691
-rw-r--r--python/qpid/testlib.py300
-rw-r--r--[-rwxr-xr-x]python/qpid/tests/__init__.py (renamed from python/run-tests)19
-rw-r--r--python/qpid/tests/address.py199
-rw-r--r--python/qpid/tests/framing.py289
-rw-r--r--python/qpid/tests/messaging.py929
-rw-r--r--python/qpid/tests/mimetype.py56
-rw-r--r--python/qpid/tests/parser.py37
-rw-r--r--python/qpid/util.py70
-rw-r--r--python/qpid_config.py6
-rwxr-xr-xpython/rule2test108
-rwxr-xr-xpython/server18
-rwxr-xr-xpython/server01018
-rw-r--r--python/setup.py4
-rw-r--r--python/tests/__init__.py10
-rw-r--r--python/tests/assembler.py77
-rw-r--r--python/tests/codec.py14
-rw-r--r--python/tests/codec010.py79
-rw-r--r--python/tests/connection.py44
-rw-r--r--python/tests/datatypes.py95
-rw-r--r--python/tests/framer.py94
-rw-r--r--python/tests/spec.py56
-rw-r--r--python/tests/spec010.py70
-rw-r--r--python/tests_0-10/__init__.py1
-rw-r--r--python/tests_0-10/alternate_exchange.py68
-rw-r--r--python/tests_0-10/broker.py16
-rw-r--r--python/tests_0-10/dtx.py6
-rw-r--r--python/tests_0-10/example.py4
-rw-r--r--python/tests_0-10/exchange.py47
-rw-r--r--python/tests_0-10/management.py339
-rw-r--r--python/tests_0-10/message.py171
-rw-r--r--python/tests_0-10/persistence.py5
-rw-r--r--python/tests_0-10/query.py16
-rw-r--r--python/tests_0-10/queue.py34
-rw-r--r--python/tests_0-10/tx.py10
-rw-r--r--python/tests_0-8/__init__.py2
-rw-r--r--python/tests_0-8/basic.py7
-rw-r--r--python/tests_0-8/broker.py24
-rw-r--r--python/tests_0-8/example.py2
-rw-r--r--python/tests_0-8/queue.py2
-rw-r--r--python/tests_0-8/testlib.py2
-rw-r--r--python/tests_0-8/tx.py2
-rw-r--r--python/tests_0-9/__init__.py2
-rw-r--r--python/tests_0-9/basic.py396
-rw-r--r--python/tests_0-9/broker.py133
-rw-r--r--python/tests_0-9/dtx.py587
-rw-r--r--python/tests_0-9/example.py94
-rw-r--r--python/tests_0-9/exchange.py327
-rw-r--r--python/tests_0-9/message.py657
-rw-r--r--python/tests_0-9/query.py2
-rw-r--r--python/tests_0-9/queue.py261
-rw-r--r--python/tests_0-9/testlib.py66
-rw-r--r--python/tests_0-9/tx.py188
-rw-r--r--python/todo.txt188
152 files changed, 14343 insertions, 5965 deletions
diff --git a/python/LICENSE.txt b/python/LICENSE.txt
index 6b0b1270ff..6b0b1270ff 100755..100644
--- a/python/LICENSE.txt
+++ b/python/LICENSE.txt
diff --git a/python/Makefile b/python/Makefile
new file mode 100644
index 0000000000..7f475adc09
--- /dev/null
+++ b/python/Makefile
@@ -0,0 +1,98 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+PREFIX=/usr/local
+EXEC_PREFIX=$(PREFIX)/bin
+DATA_DIR=$(PREFIX)/share
+
+PYTHON_LIB=$(shell python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(prefix='$(PREFIX)')")
+PYTHON_VERSION=$(shell python -c "from distutils.sysconfig import get_python_version; print get_python_version()")
+
+ddfirst=$(shell ddir=$(DATA_DIR) && echo $${ddir:0:1})
+ifeq ($(ddfirst),/)
+AMQP_SPEC_DIR=$(DATA_DIR)/amqp
+else
+AMQP_SPEC_DIR=$(PWD)/$(DATA_DIR)/amqp
+endif
+
+DIRS=qmf qpid mllib models examples tests tests_0-8 tests_0-9 tests_0-10
+SRCS=$(shell find $(DIRS) -name "*.py") qpid_config.py
+BUILD=build
+TARGETS=$(SRCS:%.py=$(BUILD)/%.py)
+
+PYCC=python -O -c "import compileall; compileall.main()"
+
+all: build
+
+$(BUILD)/%.py: %.py
+ @mkdir -p $(shell dirname $@)
+ ./preppy $(PYTHON_VERSION) < $< > $@
+
+build: $(TARGETS)
+
+.PHONY: doc
+
+doc:
+ @mkdir -p $(BUILD)
+ PYTHONPATH=. epydoc qpid.messaging -o $(BUILD)/doc --no-private --no-sourcecode --include-log
+
+install: build
+ install -d $(PYTHON_LIB)
+
+ install -d $(PYTHON_LIB)/mllib
+ install -pm 0644 LICENSE.txt NOTICE.txt $(BUILD)/mllib/*.* $(PYTHON_LIB)/mllib
+ $(PYCC) $(PYTHON_LIB)/mllib
+
+ install -d $(PYTHON_LIB)/qpid
+ install -pm 0644 LICENSE.txt NOTICE.txt README.txt $(BUILD)/qpid/*.* $(PYTHON_LIB)/qpid
+ TDIR=$(shell mktemp -d) && \
+ sed s@AMQP_SPEC_DIR=.*@AMQP_SPEC_DIR='"$(AMQP_SPEC_DIR)"'@ \
+ $(BUILD)/qpid_config.py > $${TDIR}/qpid_config.py && \
+ install -pm 0644 $${TDIR}/qpid_config.py $(PYTHON_LIB) && \
+ rm -rf $${TDIR}
+
+ install -d $(PYTHON_LIB)/qpid/tests
+ install -pm 0644 $(BUILD)/qpid/tests/*.* $(PYTHON_LIB)/qpid/tests
+ $(PYCC) $(PYTHON_LIB)/qpid
+
+ install -d $(PYTHON_LIB)/qmf
+ install -pm 0644 LICENSE.txt NOTICE.txt qmf/*.* $(PYTHON_LIB)/qmf
+ $(PYCC) $(PYTHON_LIB)/qmf
+
+ install -d $(PYTHON_LIB)/tests
+ install -pm 0644 $(BUILD)/tests/*.* $(PYTHON_LIB)/tests
+ $(PYCC) $(PYTHON_LIB)/tests
+
+ install -d $(PYTHON_LIB)/tests_0-8
+ install -pm 0644 $(BUILD)/tests_0-8/*.* $(PYTHON_LIB)/tests_0-8
+ $(PYCC) $(PYTHON_LIB)/tests_0-8
+
+ install -d $(PYTHON_LIB)/tests_0-9
+ install -pm 0644 $(BUILD)/tests_0-9/*.* $(PYTHON_LIB)/tests_0-9
+ $(PYCC) $(PYTHON_LIB)/tests_0-9
+
+ install -d $(PYTHON_LIB)/tests_0-10
+ install -pm 0644 $(BUILD)/tests_0-10/*.* $(PYTHON_LIB)/tests_0-10
+ $(PYCC) $(PYTHON_LIB)/tests_0-10
+
+ install -d $(EXEC_PREFIX)
+ install -pm 0755 qpid-python-test commands/* $(EXEC_PREFIX)
+
+clean:
+ rm -rf $(BUILD)
diff --git a/python/README.txt b/python/README.txt
index e7bb5af408..772271cffe 100644
--- a/python/README.txt
+++ b/python/README.txt
@@ -1,32 +1,50 @@
-= RUNNING THE PYTHON TESTS =
+= INSTALLATION =
-The tests/ directory contains a collection of python unit tests to
-exercise functions of a broker.
+Extract the release archive into a directory of your choice and set
+your PYTHONPATH accordingly:
-Simplest way to run the tests:
+ tar -xzf qpid-python-<version>.tar.gz -C <install-prefix>
+ export PYTHONPATH=<install-prefix>/qpid-<version>/python
- * Run a broker on the default port
+= GETTING STARTED =
- * ./run-tests
+The python client includes a simple hello-world example that publishes
+and consumes a message:
-For additional options: ./run-tests --help
+ cp <install-prefix>/qpid-<version>/python/hello-world .
+ ./hello-world
+= EXAMPLES =
-== Expected failures ==
+More comprehensive examples can be found here:
-Until we complete functionality, tests may fail because the tested
-functionality is missing in the broker. To skip expected failures
-in the C++ or Java brokers:
+ cd <install-prefix>/qpid-<version>/python/examples
- ./run-tests -I <file_name>
+= RUNNING THE TESTS =
-=== File List ===
+The "tests" directory contains a collection of unit tests for the
+python client. The "tests_0-10", "tests_0-9", and "tests_0-8"
+directories contain protocol level conformance tests for AMQP brokers
+of the specified version.
-1. cpp_failing_0-10.txt
-2. cpp_failing_0-9.txt
-3. cpp_failing_0-8.txt
-4. java_failing_0-9.txt
-5. java_failing_0-8.txt
-6. cpp_failing_0-10_preview.txt -- will be depricated soon.
+The qpid-python-test script may be used to run these tests. It will by
+default run the python unit tests and the 0-10 conformance tests:
-If you fix a failure, please remove it from the corresponding list.
+ 1. Run a broker on the default port
+
+ 2. ./qpid-python-test
+
+If you wish to run the 0-8 or 0-9 conformence tests, they may be
+selected as follows:
+
+ 1. Run a broker on the default port
+
+ 2. ./qpid-python-test tests_0-8.*
+
+ -- or --
+
+ ./qpid-python-test tests_0-9.*
+
+See the qpid-python-test usage for for additional options:
+
+ ./qpid-python-test -h
diff --git a/python/RELEASE_NOTES b/python/RELEASE_NOTES
index 7005aa83cb..c0903df38e 100644
--- a/python/RELEASE_NOTES
+++ b/python/RELEASE_NOTES
@@ -1,25 +1,17 @@
-Apache Incubator Qpid Python M2 Release Notes
--------------------------------------------
+Apache Python M4 Release Notes
+------------------------------
-The Qpid M2 release contains support the for AMQP 0-8 specification.
-You can access the 0-8 specification using the following link.
-http://www.amqp.org/tikiwiki/tiki-index.php?page=Download
-
-For full details of Qpid capabilities, as they currently stand, see our
-detailed project documentation at:
-
-http://cwiki.apache.org/confluence/pages/viewpage.action?pageId=28284
-
-Please take time to go through the README file provided with the distro.
+The Qpid M4 release of the python client contains support the for both
+ 0-8 and 0-10 of the AMQP specification as well as support for the
+non-WIP portion of the 0-9 specification. You can access these
+specifications from:
+http://jira.amqp.org/confluence/display/AMQP/Download
-Known Issues/Outstanding Work
------------------------------
-
-There are no known issues for the Phyton client.
-
+For full details of Qpid capabilities, as they currently stand, see our
+project page at:
-M2 Tasks Completed
--------------------
+http://cwiki.apache.org/confluence/display/qpid/Index
-Bug QPID-467 Complete Interop Testing
+The README file provided contains some details on installing and using
+the python client that is included with this distribution.
diff --git a/python/amqp-doc b/python/amqp-doc
deleted file mode 100755
index 1f5910f942..0000000000
--- a/python/amqp-doc
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-import sys, re
-from qpid.spec import load, pythonize
-from getopt import gnu_getopt as getopt, GetoptError
-from fnmatch import fnmatchcase as fnmatch
-
-def die(msg):
- print >> sys.stderr, msg
- sys.exit(1)
-
-def usage(msg = ""):
- return ("""%s
-
-Usage %s [<options>] [<pattern_1> ... <pattern_n>]
-
-Options:
- -e, --regexp use regex instead of glob when matching
- -s, --spec <url> location of amqp.xml
-""" % (msg, sys.argv[0])).strip()
-
-try:
- opts, args = getopt(sys.argv[1:], "s:ea:", ["regexp", "spec=", "additional="])
-except GetoptError, e:
- die(str(e))
-
-regexp = False
-spec = "../specs/amqp.0-9.xml"
-errata = []
-for k, v in opts:
- if k == "-e" or k == "--regexp": regexp = True
- if k == "-s" or k == "--spec": spec = v
- if k == "-a" or k == "--additional": errata.append(v)
-
-if regexp:
- def match(pattern, value):
- try:
- return re.match(pattern, value)
- except Exception, e:
- die("error: '%s': %s" % (pattern, e))
-else:
- def match(pattern, value):
- return fnmatch(value, pattern)
-
-spec = load(spec, *errata)
-methods = {}
-patterns = args
-for pattern in patterns:
- for c in spec.classes:
- for m in c.methods:
- name = pythonize("%s_%s" % (c.name, m.name))
- if match(pattern, name):
- methods[name] = m.define_method(name)
-
-if patterns:
- if methods:
- AMQP = type("AMQP[%s]" % ", ".join(patterns), (), methods)
- else:
- die("no matches")
-else:
- AMQP = spec.define_class("AMQP")
-
-help(AMQP)
diff --git a/python/commands/qpid-cluster b/python/commands/qpid-cluster
new file mode 100755
index 0000000000..7afb7671b8
--- /dev/null
+++ b/python/commands/qpid-cluster
@@ -0,0 +1,328 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import getopt
+import sys
+import locale
+import socket
+import re
+from qmf.console import Session
+
+class Config:
+ def __init__(self):
+ self._host = "localhost"
+ self._connTimeout = 10
+ self._stopId = None
+ self._stopAll = False
+ self._force = False
+ self._numeric = False
+ self._showConn = False
+ self._delConn = None
+
+def usage ():
+ print "Usage: qpid-cluster [OPTIONS] [broker-addr]"
+ print
+ print " broker-addr is in the form: [username/password@] hostname | ip-address [:<port>]"
+ print " ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost"
+ print
+ print "Options:"
+ print " --timeout seconds (10) Maximum time to wait for broker connection"
+ print " -C [--all-connections] View client connections to all cluster members"
+ print " -c [--connections] ID View client connections to specified member"
+ print " -d [--del-connection] HOST:PORT"
+ print " Disconnect a client connection"
+ print " -s [--stop] ID Stop one member of the cluster by its ID"
+ print " -k [--all-stop] Shut down the whole cluster"
+ print " -f [--force] Suppress the 'are-you-sure?' prompt"
+ print " -n [--numeric] Don't resolve names"
+ print
+
+class IpAddr:
+ def __init__(self, text):
+ if text.find("@") != -1:
+ tokens = text.split("@")
+ text = tokens[1]
+ if text.find(":") != -1:
+ tokens = text.split(":")
+ text = tokens[0]
+ self.port = int(tokens[1])
+ else:
+ self.port = 5672
+ self.dottedQuad = socket.gethostbyname(text)
+ nums = self.dottedQuad.split(".")
+ self.addr = (int(nums[0]) << 24) + (int(nums[1]) << 16) + (int(nums[2]) << 8) + int(nums[3])
+
+ def bestAddr(self, addrPortList):
+ bestDiff = 0xFFFFFFFFL
+ bestAddr = None
+ for addrPort in addrPortList:
+ diff = IpAddr(addrPort[0]).addr ^ self.addr
+ if diff < bestDiff:
+ bestDiff = diff
+ bestAddr = addrPort
+ return bestAddr
+
+class BrokerManager:
+ def __init__(self, config):
+ self.config = config
+ self.brokerName = None
+ self.qmf = None
+ self.broker = None
+
+ def SetBroker(self, brokerUrl):
+ self.url = brokerUrl
+ self.qmf = Session()
+ self.broker = self.qmf.addBroker(brokerUrl, self.config._connTimeout)
+ agents = self.qmf.getAgents()
+ for a in agents:
+ if a.getAgentBank() == 0:
+ self.brokerAgent = a
+
+ def Disconnect(self):
+ if self.broker:
+ self.qmf.delBroker(self.broker)
+
+ def _getClusters(self):
+ packages = self.qmf.getPackages()
+ if "org.apache.qpid.cluster" not in packages:
+ raise Exception("Clustering is not installed on the broker.")
+
+ clusters = self.qmf.getObjects(_class="cluster", _agent=self.brokerAgent)
+ if len(clusters) == 0:
+ raise Exception("Clustering is installed but not enabled on the broker.")
+
+ return clusters
+
+ def _getHostList(self, urlList):
+ hosts = []
+ hostAddr = IpAddr(self.config._host)
+ for url in urlList:
+ if url.find("amqp:") != 0:
+ raise Exception("Invalid URL 1")
+ url = url[5:]
+ addrs = str(url).split(",")
+ addrList = []
+ for addr in addrs:
+ tokens = addr.split(":")
+ if len(tokens) != 3:
+ raise Exception("Invalid URL 2")
+ addrList.append((tokens[1], tokens[2]))
+
+ # Find the address in the list that is most likely to be in the same subnet as the address
+ # with which we made the original QMF connection. This increases the probability that we will
+ # be able to reach the cluster member.
+
+ best = hostAddr.bestAddr(addrList)
+ bestUrl = best[0] + ":" + best[1]
+ hosts.append(bestUrl)
+ return hosts
+
+ def overview(self):
+ clusters = self._getClusters()
+ cluster = clusters[0]
+ memberList = cluster.members.split(";")
+ idList = cluster.memberIDs.split(";")
+
+ print " Cluster Name: %s" % cluster.clusterName
+ print "Cluster Status: %s" % cluster.status
+ print " Cluster Size: %d" % cluster.clusterSize
+ print " Members: ID=%s URL=%s" % (idList[0], memberList[0])
+ for idx in range(1,len(idList)):
+ print " : ID=%s URL=%s" % (idList[idx], memberList[idx])
+
+ def stopMember(self, id):
+ clusters = self._getClusters()
+ cluster = clusters[0]
+ idList = cluster.memberIDs.split(";")
+ if id not in idList:
+ raise Exception("No member with matching ID found")
+
+ if not self.config._force:
+ prompt = "Warning: "
+ if len(idList) == 1:
+ prompt += "This command will shut down the last running cluster member."
+ else:
+ prompt += "This command will shut down a cluster member."
+ prompt += " Are you sure? [N]: "
+
+ confirm = raw_input(prompt)
+ if len(confirm) == 0 or confirm[0].upper() != 'Y':
+ raise Exception("Operation canceled")
+
+ cluster.stopClusterNode(id)
+
+ def stopAll(self):
+ clusters = self._getClusters()
+ if not self.config._force:
+ prompt = "Warning: This command will shut down the entire cluster."
+ prompt += " Are you sure? [N]: "
+
+ confirm = raw_input(prompt)
+ if len(confirm) == 0 or confirm[0].upper() != 'Y':
+ raise Exception("Operation canceled")
+
+ cluster = clusters[0]
+ cluster.stopFullCluster()
+
+ def showConnections(self):
+ clusters = self._getClusters()
+ cluster = clusters[0]
+ memberList = cluster.members.split(";")
+ idList = cluster.memberIDs.split(";")
+ displayList = []
+ hostList = self._getHostList(memberList)
+ self.qmf.delBroker(self.broker)
+ self.broker = None
+ self.brokers = []
+ pattern = re.compile("^\\d+\\.\\d+\\.\\d+\\.\\d+:\\d+$")
+
+ idx = 0
+ for host in hostList:
+ if self.config._showConn == "all" or self.config._showConn == idList[idx] or self.config._delConn:
+ self.brokers.append(self.qmf.addBroker(host, self.config._connTimeout))
+ displayList.append(idList[idx])
+ idx += 1
+
+ idx = 0
+ found = False
+ for broker in self.brokers:
+ if not self.config._delConn:
+ print "Clients on Member: ID=%s:" % displayList[idx]
+ connList = self.qmf.getObjects(_class="connection", _package="org.apache.qpid.broker", _broker=broker)
+ for conn in connList:
+ if pattern.match(conn.address):
+ if self.config._numeric or self.config._delConn:
+ a = conn.address
+ else:
+ tokens = conn.address.split(":")
+ try:
+ hostList = socket.gethostbyaddr(tokens[0])
+ host = hostList[0]
+ except:
+ host = tokens[0]
+ a = host + ":" + tokens[1]
+ if self.config._delConn:
+ tokens = self.config._delConn.split(":")
+ ip = socket.gethostbyname(tokens[0])
+ toDelete = ip + ":" + tokens[1]
+ if a == toDelete:
+ print "Closing connection from client: %s" % a
+ conn.close()
+ found = True
+ else:
+ print " %s" % a
+ idx += 1
+ if not self.config._delConn:
+ print
+ if self.config._delConn and not found:
+ print "Client connection '%s' not found" % self.config._delConn
+
+ for broker in self.brokers:
+ self.qmf.delBroker(broker)
+
+
+def main(argv=None):
+ if argv is None: argv = sys.argv
+ try:
+ config = Config()
+ try:
+ longOpts = ("stop=", "all-stop", "force", "connections=", "all-connections" "del-connection=", "numeric", "timeout=")
+ (optlist, encArgs) = getopt.gnu_getopt(argv[1:], "s:kfCc:d:n", longOpts)
+ except:
+ usage()
+ return 1
+
+ try:
+ encoding = locale.getpreferredencoding()
+ cargs = [a.decode(encoding) for a in encArgs]
+ except:
+ cargs = encArgs
+
+ count = 0
+ for opt in optlist:
+ if opt[0] == "--timeout":
+ config._connTimeout = int(opt[1])
+ if config._connTimeout == 0:
+ config._connTimeout = None
+ if opt[0] == "-s" or opt[0] == "--stop":
+ config._stopId = opt[1]
+ if len(config._stopId.split(":")) != 2:
+ raise Exception("Member ID must be of form: <host or ip>:<number>")
+ count += 1
+ if opt[0] == "-k" or opt[0] == "--all-stop":
+ config._stopAll = True
+ count += 1
+ if opt[0] == "-f" or opt[0] == "--force":
+ config._force = True
+ if opt[0] == "-n" or opt[0] == "--numeric":
+ config._numeric = True
+ if opt[0] == "-C" or opt[0] == "--all-connections":
+ config._showConn = "all"
+ count += 1
+ if opt[0] == "-c" or opt[0] == "--connections":
+ config._showConn = opt[1]
+ if len(config._showConn.split(":")) != 2:
+ raise Exception("Member ID must be of form: <host or ip>:<number>")
+ count += 1
+ if opt[0] == "-d" or opt[0] == "--del-connection":
+ config._delConn = opt[1]
+ if len(config._delConn.split(":")) != 2:
+ raise Exception("Connection must be of form: <host or ip>:<port>")
+ count += 1
+
+ if count > 1:
+ print "Only one command option may be supplied"
+ print
+ usage()
+ return 1
+
+ nargs = len(cargs)
+ bm = BrokerManager(config)
+
+ if nargs == 1:
+ config._host = cargs[0]
+
+ try:
+ bm.SetBroker(config._host)
+ if config._stopId:
+ bm.stopMember(config._stopId)
+ elif config._stopAll:
+ bm.stopAll()
+ elif config._showConn or config._delConn:
+ bm.showConnections()
+ else:
+ bm.overview()
+ except KeyboardInterrupt:
+ print
+ except Exception,e:
+ if str(e).find("connection aborted") > 0:
+ # we expect this when asking the connected broker to shut down
+ return 0
+ raise Exception("Failed: %s - %s" % (e.__class__.__name__, e))
+
+ bm.Disconnect()
+ except Exception, e:
+ print str(e)
+ return 1
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/python/commands/qpid-config b/python/commands/qpid-config
index cc9315f7ea..39af67f39c 100755
--- a/python/commands/qpid-config
+++ b/python/commands/qpid-config
@@ -22,30 +22,39 @@
import os
import getopt
import sys
-import socket
-import qpid
-from threading import Condition
-from qpid.management import managementClient
-from qpid.managementdata import Broker
-from qpid.peer import Closed
-from qpid.connection import Connection, ConnectionFailed
-from qpid.datatypes import uuid4
-from qpid.util import connect
-from time import sleep
-
-_recursive = False
-_host = "localhost"
-_durable = False
-_fileCount = 8
-_fileSize = 24
-_maxQueueSize = None
-_maxQueueCount= None
-
+import locale
+from qmf.console import Session
+
+_recursive = False
+_host = "localhost"
+_connTimeout = 10
+_altern_ex = None
+_passive = False
+_durable = False
+_clusterDurable = False
+_if_empty = True
+_if_unused = True
+_fileCount = 8
+_fileSize = 24
+_maxQueueSize = None
+_maxQueueCount = None
+_limitPolicy = None
+_order = None
+_msgSequence = False
+_ive = False
+_eventGeneration = None
FILECOUNT = "qpid.file_count"
FILESIZE = "qpid.file_size"
MAX_QUEUE_SIZE = "qpid.max_size"
MAX_QUEUE_COUNT = "qpid.max_count"
+POLICY_TYPE = "qpid.policy_type"
+CLUSTER_DURABLE = "qpid.persist_last_node"
+LVQ = "qpid.last_value_queue"
+LVQNB = "qpid.last_value_queue_no_browse"
+MSG_SEQUENCE = "qpid.msg_sequence"
+IVE = "qpid.ive"
+QUEUE_EVENT_GENERATION = "qpid.queue_event_generation"
def Usage ():
print "Usage: qpid-config [OPTIONS]"
@@ -54,68 +63,99 @@ def Usage ():
print " qpid-config [OPTIONS] add exchange <type> <name> [AddExchangeOptions]"
print " qpid-config [OPTIONS] del exchange <name>"
print " qpid-config [OPTIONS] add queue <name> [AddQueueOptions]"
- print " qpid-config [OPTIONS] del queue <name>"
+ print " qpid-config [OPTIONS] del queue <name> [DelQueueOptions]"
print " qpid-config [OPTIONS] bind <exchange-name> <queue-name> [binding-key]"
print " qpid-config [OPTIONS] unbind <exchange-name> <queue-name> [binding-key]"
print
print "Options:"
+ print " --timeout seconds (10) Maximum time to wait for broker connection"
print " -b [ --bindings ] Show bindings in queue or exchange list"
print " -a [ --broker-addr ] Address (localhost) Address of qpidd broker"
print " broker-addr is in the form: [username/password@] hostname | ip-address [:<port>]"
print " ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost"
print
print "Add Queue Options:"
- print " --durable Queue is durable"
- print " --file-count N (8) Number of files in queue's persistence journal"
- print " --file-size N (24) File size in pages (64Kib/page)"
- print " --max-queue-size N Maximum in-memory queue size as bytes"
- print " --max-queue-count N Maximum in-memory queue size as a number of messages"
+ print " --alternate-exchange [name of the alternate exchange]"
+ print " The alternate-exchange field specifies how messages on this queue should"
+ print " be treated when they are rejected by a subscriber, or when they are"
+ print " orphaned by queue deletion. When present, rejected or orphaned messages"
+ print " MUST be routed to the alternate-exchange. In all cases the messages MUST"
+ print " be removed from the queue."
+ print " --passive Do not actually change the broker state (queue will not be created)"
+ print " --durable Queue is durable"
+ print " --cluster-durable Queue becomes durable if there is only one functioning cluster node"
+ print " --file-count N (8) Number of files in queue's persistence journal"
+ print " --file-size N (24) File size in pages (64Kib/page)"
+ print " --max-queue-size N Maximum in-memory queue size as bytes"
+ print " --max-queue-count N Maximum in-memory queue size as a number of messages"
+ print " --limit-policy [none | reject | flow-to-disk | ring | ring-strict]"
+ print " Action taken when queue limit is reached:"
+ print " none (default) - Use broker's default policy"
+ print " reject - Reject enqueued messages"
+ print " flow-to-disk - Page messages to disk"
+ print " ring - Replace oldest unacquired message with new"
+ print " ring-strict - Replace oldest message, reject if oldest is acquired"
+ print " --order [fifo | lvq | lvq-no-browse]"
+ print " Set queue ordering policy:"
+ print " fifo (default) - First in, first out"
+ print " lvq - Last Value Queue ordering, allows queue browsing"
+ print " lvq-no-browse - Last Value Queue ordering, browsing clients may lose data"
+ print " --generate-queue-events N"
+ print " If set to 1, every enqueue will generate an event that can be processed by"
+ print " registered listeners (e.g. for replication). If set to 2, events will be"
+ print " generated for enqueues and dequeues"
+ print
+ print "Del Queue Options:"
+ print " --force Force delete of queue even if it's currently used or it's not empty"
+ print " --force-if-not-empty Force delete of queue even if it's not empty"
+ print " --force-if-used Force delete of queue even if it's currently used"
+ print
+ print "Add Exchange <type> values:"
+ print " direct Direct exchange for point-to-point communication"
+ print " fanout Fanout exchange for broadcast communication"
+ print " topic Topic exchange that routes messages using binding keys with wildcards"
+ print " headers Headers exchange that matches header fields against the binding keys"
print
print "Add Exchange Options:"
- print " --durable Exchange is durable"
+ print " --alternate-exchange [name of the alternate exchange]"
+ print " In the event that a message cannot be routed, this is the name of the exchange to"
+ print " which the message will be sent. Messages transferred using message.transfer will"
+ print " be routed to the alternate-exchange only if they are sent with the \"none\""
+ print " accept-mode, and the discard-unroutable delivery property is set to false, and"
+ print " there is no queue to route to for the given message according to the bindings"
+ print " on this exchange."
+ print " --passive Do not actually change the broker state (exchange will not be created)"
+ print " --durable Exchange is durable"
+ print " --sequence Exchange will insert a 'qpid.msg_sequence' field in the message header"
+ print " with a value that increments for each message forwarded."
+ print " --ive Exchange will behave as an 'initial-value-exchange', keeping a reference"
+ print " to the last message forwarded and enqueuing that message to newly bound"
+ print " queues."
print
sys.exit (1)
class BrokerManager:
def __init__ (self):
- self.dest = None
- self.src = None
- self.broker = None
-
- def SetBroker (self, broker):
- self.broker = broker
-
- def ConnectToBroker (self):
- try:
- self.sessionId = "%s.%d" % (os.uname()[1], os.getpid())
- self.conn = Connection (connect (self.broker.host, self.broker.port),
- username=self.broker.username, password=self.broker.password)
- self.conn.start ()
- self.session = self.conn.session (self.sessionId)
- self.mclient = managementClient (self.conn.spec)
- self.mchannel = self.mclient.addChannel (self.session)
- except socket.error, e:
- print "Socket Error %s - %s" % (e[0], e[1])
- sys.exit (1)
- except Closed, e:
- print "Connect Failed %d - %s" % (e[0], e[1])
- sys.exit (1)
- except ConnectionFailed, e:
- print "Connect Failed %d - %s" % (e[0], e[1])
- sys.exit(1)
-
- def Disconnect (self):
- self.mclient.removeChannel (self.mchannel)
- self.session.close(timeout=10)
- self.conn.close(timeout=10)
+ self.brokerName = None
+ self.qmf = None
+ self.broker = None
+
+ def SetBroker (self, brokerUrl):
+ self.url = brokerUrl
+ self.qmf = Session()
+ self.broker = self.qmf.addBroker(brokerUrl, _connTimeout)
+ agents = self.qmf.getAgents()
+ for a in agents:
+ if a.getAgentBank() == 0:
+ self.brokerAgent = a
+
+ def Disconnect(self):
+ if self.broker:
+ self.qmf.delBroker(self.broker)
def Overview (self):
- self.ConnectToBroker ()
- mc = self.mclient
- mch = self.mchannel
- mc.syncWaitForStable (mch)
- exchanges = mc.syncGetObjects (mch, "exchange")
- queues = mc.syncGetObjects (mch, "queue")
+ exchanges = self.qmf.getObjects(_class="exchange", _agent=self.brokerAgent)
+ queues = self.qmf.getObjects(_class="queue", _agent=self.brokerAgent)
print "Total Exchanges: %d" % len (exchanges)
etype = {}
for ex in exchanges:
@@ -136,30 +176,39 @@ class BrokerManager:
print " non-durable: %d" % (len (queues) - _durable)
def ExchangeList (self, filter):
- self.ConnectToBroker ()
- mc = self.mclient
- mch = self.mchannel
- mc.syncWaitForStable (mch)
- exchanges = mc.syncGetObjects (mch, "exchange")
- print "Durable Type Bindings Exchange Name"
- print "======================================================="
+ exchanges = self.qmf.getObjects(_class="exchange", _agent=self.brokerAgent)
+ caption1 = "Type "
+ caption2 = "Exchange Name"
+ maxNameLen = len(caption2)
+ for ex in exchanges:
+ if self.match(ex.name, filter):
+ if len(ex.name) > maxNameLen: maxNameLen = len(ex.name)
+ print "%s%-*s Attributes" % (caption1, maxNameLen, caption2)
+ line = ""
+ for i in range(((maxNameLen + len(caption1)) / 5) + 5):
+ line += "====="
+ print line
+
for ex in exchanges:
if self.match (ex.name, filter):
- print "%4c %-10s%5d %s" % (YN (ex.durable), ex.type, ex.bindingCount, ex.name)
+ print "%-10s%-*s " % (ex.type, maxNameLen, ex.name),
+ args = ex.arguments
+ if ex.durable: print "--durable",
+ if MSG_SEQUENCE in args and args[MSG_SEQUENCE] == 1: print "--sequence",
+ if IVE in args and args[IVE] == 1: print "--ive",
+ if ex.altExchange:
+ print "--alternate-exchange=%s" % ex._altExchange_.name,
+ print
def ExchangeListRecurse (self, filter):
- self.ConnectToBroker ()
- mc = self.mclient
- mch = self.mchannel
- mc.syncWaitForStable (mch)
- exchanges = mc.syncGetObjects (mch, "exchange")
- bindings = mc.syncGetObjects (mch, "binding")
- queues = mc.syncGetObjects (mch, "queue")
+ exchanges = self.qmf.getObjects(_class="exchange", _agent=self.brokerAgent)
+ bindings = self.qmf.getObjects(_class="binding", _agent=self.brokerAgent)
+ queues = self.qmf.getObjects(_class="queue", _agent=self.brokerAgent)
for ex in exchanges:
if self.match (ex.name, filter):
print "Exchange '%s' (%s)" % (ex.name, ex.type)
for bind in bindings:
- if bind.exchangeRef == ex.id:
+ if bind.exchangeRef == ex.getObjectId():
qname = "<unknown>"
queue = self.findById (queues, bind.queueRef)
if queue != None:
@@ -168,43 +217,48 @@ class BrokerManager:
def QueueList (self, filter):
- self.ConnectToBroker ()
- mc = self.mclient
- mch = self.mchannel
- mc.syncWaitForStable (mch)
- queues = mc.syncGetObjects (mch, "queue")
- journals = mc.syncGetObjects (mch, "journal")
- print " Store Size"
- print "Durable AutoDel Excl Bindings (files x file pages) Queue Name"
- print "==========================================================================================="
+ queues = self.qmf.getObjects(_class="queue", _agent=self.brokerAgent)
+
+ caption = "Queue Name"
+ maxNameLen = len(caption)
+ for q in queues:
+ if self.match (q.name, filter):
+ if len(q.name) > maxNameLen: maxNameLen = len(q.name)
+ print "%-*s Attributes" % (maxNameLen, caption)
+ line = ""
+ for i in range((maxNameLen / 5) + 5):
+ line += "====="
+ print line
+
for q in queues:
if self.match (q.name, filter):
+ print "%-*s " % (maxNameLen, q.name),
args = q.arguments
- if q.durable and FILESIZE in args and FILECOUNT in args:
- fs = int (args[FILESIZE])
- fc = int (args[FILECOUNT])
- print "%4c%9c%7c%10d%11dx%-14d%s" % \
- (YN (q.durable), YN (q.autoDelete),
- YN (q.exclusive), q.bindingCount, fc, fs, q.name)
- else:
- if not _durable:
- print "%4c%9c%7c%10d %s" % \
- (YN (q.durable), YN (q.autoDelete),
- YN (q.exclusive), q.bindingCount, q.name)
+ if q.durable: print "--durable",
+ if CLUSTER_DURABLE in args and args[CLUSTER_DURABLE] == 1: print "--cluster-durable",
+ if q.autoDelete: print "auto-del",
+ if q.exclusive: print "excl",
+ if FILESIZE in args: print "--file-size=%d" % args[FILESIZE],
+ if FILECOUNT in args: print "--file-count=%d" % args[FILECOUNT],
+ if MAX_QUEUE_SIZE in args: print "--max-queue-size=%d" % args[MAX_QUEUE_SIZE],
+ if MAX_QUEUE_COUNT in args: print "--max-queue-count=%d" % args[MAX_QUEUE_COUNT],
+ if POLICY_TYPE in args: print "--limit-policy=%s" % args[POLICY_TYPE].replace("_", "-"),
+ if LVQ in args and args[LVQ] == 1: print "--order lvq",
+ if LVQNB in args and args[LVQNB] == 1: print "--order lvq-no-browse",
+ if QUEUE_EVENT_GENERATION in args: print "--generate-queue-events=%d" % args[QUEUE_EVENT_GENERATION],
+ if q.altExchange:
+ print "--alternate-exchange=%s" % q._altExchange_.name,
+ print
def QueueListRecurse (self, filter):
- self.ConnectToBroker ()
- mc = self.mclient
- mch = self.mchannel
- mc.syncWaitForStable (mch)
- exchanges = mc.syncGetObjects (mch, "exchange")
- bindings = mc.syncGetObjects (mch, "binding")
- queues = mc.syncGetObjects (mch, "queue")
+ exchanges = self.qmf.getObjects(_class="exchange", _agent=self.brokerAgent)
+ bindings = self.qmf.getObjects(_class="binding", _agent=self.brokerAgent)
+ queues = self.qmf.getObjects(_class="queue", _agent=self.brokerAgent)
for queue in queues:
if self.match (queue.name, filter):
print "Queue '%s'" % queue.name
for bind in bindings:
- if bind.queueRef == queue.id:
+ if bind.queueRef == queue.getObjectId():
ename = "<unknown>"
ex = self.findById (exchanges, bind.exchangeRef)
if ex != None:
@@ -216,30 +270,27 @@ class BrokerManager:
def AddExchange (self, args):
if len (args) < 2:
Usage ()
- self.ConnectToBroker ()
etype = args[0]
ename = args[1]
-
- try:
- self.session.exchange_declare (exchange=ename, type=etype, durable=_durable)
- except Closed, e:
- print "Failed:", e
+ declArgs = {}
+ if _msgSequence:
+ declArgs[MSG_SEQUENCE] = 1
+ if _ive:
+ declArgs[IVE] = 1
+ if _altern_ex != None:
+ self.broker.getAmqpSession().exchange_declare (exchange=ename, type=etype, alternate_exchange=_altern_ex, passive=_passive, durable=_durable, arguments=declArgs)
+ else:
+ self.broker.getAmqpSession().exchange_declare (exchange=ename, type=etype, passive=_passive, durable=_durable, arguments=declArgs)
def DelExchange (self, args):
if len (args) < 1:
Usage ()
- self.ConnectToBroker ()
ename = args[0]
-
- try:
- self.session.exchange_delete (exchange=ename)
- except Closed, e:
- print "Failed:", e
+ self.broker.getAmqpSession().exchange_delete (exchange=ename)
def AddQueue (self, args):
if len (args) < 1:
Usage ()
- self.ConnectToBroker ()
qname = args[0]
declArgs = {}
if _durable:
@@ -250,56 +301,64 @@ class BrokerManager:
declArgs[MAX_QUEUE_SIZE] = _maxQueueSize
if _maxQueueCount:
declArgs[MAX_QUEUE_COUNT] = _maxQueueCount
-
- try:
- self.session.queue_declare (queue=qname, durable=_durable, arguments=declArgs)
- except Closed, e:
- print "Failed:", e
+ if _limitPolicy:
+ if _limitPolicy == "none":
+ pass
+ elif _limitPolicy == "reject":
+ declArgs[POLICY_TYPE] = "reject"
+ elif _limitPolicy == "flow-to-disk":
+ declArgs[POLICY_TYPE] = "flow_to_disk"
+ elif _limitPolicy == "ring":
+ declArgs[POLICY_TYPE] = "ring"
+ elif _limitPolicy == "ring-strict":
+ declArgs[POLICY_TYPE] = "ring_strict"
+
+ if _clusterDurable:
+ declArgs[CLUSTER_DURABLE] = 1
+ if _order:
+ if _order == "fifo":
+ pass
+ elif _order == "lvq":
+ declArgs[LVQ] = 1
+ elif _order == "lvq-no-browse":
+ declArgs[LVQNB] = 1
+ if _eventGeneration:
+ declArgs[QUEUE_EVENT_GENERATION] = _eventGeneration
+
+ if _altern_ex != None:
+ self.broker.getAmqpSession().queue_declare (queue=qname, alternate_exchange=_altern_ex, passive=_passive, durable=_durable, arguments=declArgs)
+ else:
+ self.broker.getAmqpSession().queue_declare (queue=qname, passive=_passive, durable=_durable, arguments=declArgs)
def DelQueue (self, args):
if len (args) < 1:
Usage ()
- self.ConnectToBroker ()
qname = args[0]
-
- try:
- self.session.queue_delete (queue=qname)
- except Closed, e:
- print "Failed:", e
+ self.broker.getAmqpSession().queue_delete (queue=qname, if_empty=_if_empty, if_unused=_if_unused)
def Bind (self, args):
if len (args) < 2:
Usage ()
- self.ConnectToBroker ()
ename = args[0]
qname = args[1]
key = ""
if len (args) > 2:
key = args[2]
-
- try:
- self.session.exchange_bind (queue=qname, exchange=ename, binding_key=key)
- except Closed, e:
- print "Failed:", e
+ self.broker.getAmqpSession().exchange_bind (queue=qname, exchange=ename, binding_key=key)
def Unbind (self, args):
if len (args) < 2:
Usage ()
- self.ConnectToBroker ()
ename = args[0]
qname = args[1]
key = ""
if len (args) > 2:
key = args[2]
-
- try:
- self.session.exchange_unbind (queue=qname, exchange=ename, binding_key=key)
- except Closed, e:
- print "Failed:", e
+ self.broker.getAmqpSession().exchange_unbind (queue=qname, exchange=ename, binding_key=key)
def findById (self, items, id):
for item in items:
- if item.id == id:
+ if item.getObjectId() == id:
return item
return None
@@ -315,23 +374,43 @@ def YN (bool):
return 'Y'
return 'N'
+
##
## Main Program
##
try:
- longOpts = ("durable", "bindings", "broker-addr=", "file-count=", "file-size=", "max-queue-size=", "max-queue-count=")
- (optlist, cargs) = getopt.gnu_getopt (sys.argv[1:], "a:b", longOpts)
+ longOpts = ("durable", "cluster-durable", "bindings", "broker-addr=", "file-count=",
+ "file-size=", "max-queue-size=", "max-queue-count=", "limit-policy=",
+ "order=", "sequence", "ive", "generate-queue-events=", "force", "force-if-not-empty",
+ "force_if_used", "alternate-exchange=", "passive", "timeout=")
+ (optlist, encArgs) = getopt.gnu_getopt (sys.argv[1:], "a:b", longOpts)
except:
Usage ()
+try:
+ encoding = locale.getpreferredencoding()
+ cargs = [a.decode(encoding) for a in encArgs]
+except:
+ cargs = encArgs
+
for opt in optlist:
if opt[0] == "-b" or opt[0] == "--bindings":
_recursive = True
if opt[0] == "-a" or opt[0] == "--broker-addr":
_host = opt[1]
+ if opt[0] == "--timeout":
+ _connTimeout = int(opt[1])
+ if _connTimeout == 0:
+ _connTimeout = None
+ if opt[0] == "--alternate-exchange":
+ _altern_ex = opt[1]
+ if opt[0] == "--passive":
+ _passive = True
if opt[0] == "--durable":
_durable = True
+ if opt[0] == "--cluster-durable":
+ _clusterDurable = True
if opt[0] == "--file-count":
_fileCount = int (opt[1])
if opt[0] == "--file-size":
@@ -340,46 +419,77 @@ for opt in optlist:
_maxQueueSize = int (opt[1])
if opt[0] == "--max-queue-count":
_maxQueueCount = int (opt[1])
+ if opt[0] == "--limit-policy":
+ _limitPolicy = opt[1]
+ if _limitPolicy not in ("none", "reject", "flow-to-disk", "ring", "ring-strict"):
+ print "Error: Invalid --limit-policy argument"
+ sys.exit(1)
+ if opt[0] == "--order":
+ _order = opt[1]
+ if _order not in ("fifo", "lvq", "lvq-no-browse"):
+ print "Error: Invalid --order argument"
+ sys.exit(1)
+ if opt[0] == "--sequence":
+ _msgSequence = True
+ if opt[0] == "--ive":
+ _ive = True
+ if opt[0] == "--generate-queue-events":
+ _eventGeneration = int (opt[1])
+ if opt[0] == "--force":
+ _if_empty = False
+ _if_unused = False
+ if opt[0] == "--force-if-not-empty":
+ _if_empty = False
+ if opt[0] == "--force-if-used":
+ _if_unused = False
+
nargs = len (cargs)
bm = BrokerManager ()
-bm.SetBroker (Broker (_host))
-
-if nargs == 0:
- bm.Overview ()
-else:
- cmd = cargs[0]
- modifier = ""
- if nargs > 1:
- modifier = cargs[1]
- if cmd[0] == 'e':
- if _recursive:
- bm.ExchangeListRecurse (modifier)
- else:
- bm.ExchangeList (modifier)
- elif cmd[0] == 'q':
- if _recursive:
- bm.QueueListRecurse (modifier)
- else:
- bm.QueueList (modifier)
- elif cmd == "add":
- if modifier == "exchange":
- bm.AddExchange (cargs[2:])
- elif modifier == "queue":
- bm.AddQueue (cargs[2:])
- else:
- Usage ()
- elif cmd == "del":
- if modifier == "exchange":
- bm.DelExchange (cargs[2:])
- elif modifier == "queue":
- bm.DelQueue (cargs[2:])
+
+try:
+ bm.SetBroker(_host)
+ if nargs == 0:
+ bm.Overview ()
+ else:
+ cmd = cargs[0]
+ modifier = ""
+ if nargs > 1:
+ modifier = cargs[1]
+ if cmd == "exchanges":
+ if _recursive:
+ bm.ExchangeListRecurse (modifier)
+ else:
+ bm.ExchangeList (modifier)
+ elif cmd == "queues":
+ if _recursive:
+ bm.QueueListRecurse (modifier)
+ else:
+ bm.QueueList (modifier)
+ elif cmd == "add":
+ if modifier == "exchange":
+ bm.AddExchange (cargs[2:])
+ elif modifier == "queue":
+ bm.AddQueue (cargs[2:])
+ else:
+ Usage ()
+ elif cmd == "del":
+ if modifier == "exchange":
+ bm.DelExchange (cargs[2:])
+ elif modifier == "queue":
+ bm.DelQueue (cargs[2:])
+ else:
+ Usage ()
+ elif cmd == "bind":
+ bm.Bind (cargs[1:])
+ elif cmd == "unbind":
+ bm.Unbind (cargs[1:])
else:
Usage ()
- elif cmd == "bind":
- bm.Bind (cargs[1:])
- elif cmd == "unbind":
- bm.Unbind (cargs[1:])
- else:
- Usage ()
+except KeyboardInterrupt:
+ print
+except Exception,e:
+ print "Failed: %s: %s" % (e.__class__.__name__, e)
+ sys.exit(1)
+
bm.Disconnect()
diff --git a/python/commands/qpid-printevents b/python/commands/qpid-printevents
new file mode 100755
index 0000000000..0c1b618a1f
--- /dev/null
+++ b/python/commands/qpid-printevents
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import optparse
+import sys
+import socket
+from time import time, strftime, gmtime, sleep
+from qmf.console import Console, Session
+
+class EventConsole(Console):
+ def event(self, broker, event):
+ print event
+
+ def brokerConnected(self, broker):
+ print strftime("%c", gmtime(time())), "NOTIC qpid-printevents:brokerConnected broker=%s" % broker.getUrl()
+
+ def brokerDisconnected(self, broker):
+ print strftime("%c", gmtime(time())), "NOTIC qpid-printevents:brokerDisconnected broker=%s" % broker.getUrl()
+
+
+##
+## Main Program
+##
+def main():
+ _usage = "%prog [options] [broker-addr]..."
+ _description = \
+"""Collect and print events from one or more Qpid message brokers. If no broker-addr is
+supplied, %prog will connect to 'localhost:5672'.
+broker-addr is of the form: [username/password@] hostname | ip-address [:<port>]
+ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost
+"""
+ p = optparse.OptionParser(usage=_usage, description=_description)
+
+ options, arguments = p.parse_args()
+ if len(arguments) == 0:
+ arguments.append("localhost")
+
+ console = EventConsole()
+ session = Session(console, rcvObjects=False, rcvHeartbeats=False, manageConnections=True)
+ brokers = []
+ for host in arguments:
+ brokers.append(session.addBroker(host))
+
+ try:
+ while (True):
+ sleep(10)
+ except KeyboardInterrupt:
+ for broker in brokers:
+ session.delBroker(broker)
+ print
+ sys.exit(0)
+
+if __name__ == '__main__':
+ main()
+
diff --git a/python/commands/qpid-queue-stats b/python/commands/qpid-queue-stats
index 98dfa7580a..3b8a0dcb19 100755
--- a/python/commands/qpid-queue-stats
+++ b/python/commands/qpid-queue-stats
@@ -26,120 +26,100 @@ import re
import socket
import qpid
from threading import Condition
-from qpid.management import managementClient
-from qpid.managementdata import Broker
+from qmf.console import Session, Console
from qpid.peer import Closed
from qpid.connection import Connection, ConnectionFailed
-from qpid.util import connect
from time import sleep
-class mgmtObject (object):
- """ Generic object that holds the contents of a management object with its
- attributes set as object attributes. """
-
- def __init__ (self, classKey, timestamps, row):
- self.classKey = classKey
- self.timestamps = timestamps
- for cell in row:
- setattr (self, cell[0], cell[1])
-
-
-
-class BrokerManager:
- def __init__ (self):
- self.dest = None
- self.src = None
- self.broker = None
- self.objects = {}
- self.filter = None
-
- def SetBroker (self, broker):
- self.broker = broker
-
- def ConnectToBroker (self):
- try:
- self.sessionId = "%s.%d" % (os.uname()[1], os.getpid())
- self.conn = Connection (connect (self.broker.host, self.broker.port),
- username=self.broker.username, password=self.broker.password)
- self.conn.start ()
- self.session = self.conn.session(self.sessionId)
- self.mclient = managementClient (self.conn.spec, None, self.configCb, self.instCb)
- self.mchannel = self.mclient.addChannel (self.session)
- except socket.error, e:
- print "Socket Error %s - %s" % (e[0], e[1])
- sys.exit (1)
- except Closed, e:
- print "Connect Failed %d - %s" % (e[0], e[1])
- sys.exit (1)
- except ConnectionFailed, e:
- print "Connect Failed %d - %s" % (e[0], e[1])
- sys.exit(1)
-
- def setFilter(self,filter):
- self.filter = filter
-
- def Disconnect (self):
- self.mclient.removeChannel (self.mchannel)
- self.session.close(timeout=10)
- self.conn.close(timeout=10)
-
- def configCb (self, context, classKey, row, timestamps):
- className = classKey[1]
- if className != "queue":
- return
-
- obj = mgmtObject (classKey, timestamps, row)
- if obj.id not in self.objects:
- self.objects[obj.id] = (obj.name, None, None)
-
- def instCb (self, context, classKey, row, timestamps):
- className = classKey[1]
- if className != "queue":
- return
-
- obj = mgmtObject (classKey, timestamps, row)
- if obj.id not in self.objects:
- return
-
- (name, first, last) = self.objects[obj.id]
- if first == None:
- self.objects[obj.id] = (name, obj, None)
- return
-
- if len(self.filter) > 0 :
- match = False
-
- for x in self.filter:
- if x.match(name):
- match = True
- break
- if match == False:
- return
-
- if last == None:
- lastSample = first
- else:
- lastSample = last
-
- self.objects[obj.id] = (name, first, obj)
-
- deltaTime = float (obj.timestamps[0] - lastSample.timestamps[0])
- enqueueRate = float (obj.msgTotalEnqueues - lastSample.msgTotalEnqueues) / (deltaTime / 1000000000.0)
- dequeueRate = float (obj.msgTotalDequeues - lastSample.msgTotalDequeues) / (deltaTime / 1000000000.0)
- print "%-41s%10.2f%11d%13.2f%13.2f" % \
- (name, deltaTime / 1000000000, obj.msgDepth, enqueueRate, dequeueRate)
-
-
- def Display (self):
- self.ConnectToBroker ()
- print "Queue Name Sec Depth Enq Rate Deq Rate"
- print "========================================================================================"
- try:
- while True:
- sleep (1)
- except KeyboardInterrupt:
- pass
- self.Disconnect ()
+class BrokerManager(Console):
+ def __init__(self, host):
+ self.url = host
+ self.objects = {}
+ self.filter = None
+ self.session = Session(self, rcvEvents=False, rcvHeartbeats=False,
+ userBindings=True, manageConnections=True)
+ self.broker = self.session.addBroker(self.url)
+ self.firstError = True
+
+ def setFilter(self,filter):
+ self.filter = filter
+
+ def brokerConnected(self, broker):
+ if not self.firstError:
+ print "*** Broker connected"
+ self.firstError = False
+
+ def brokerDisconnected(self, broker):
+ print "*** Broker connection lost - %s, retrying..." % broker.getError()
+ self.firstError = False
+ self.objects.clear()
+
+ def objectProps(self, broker, record):
+ className = record.getClassKey().getClassName()
+ if className != "queue":
+ return
+
+ id = record.getObjectId().__repr__()
+ if id not in self.objects:
+ self.objects[id] = (record.name, None, None)
+
+ def objectStats(self, broker, record):
+ className = record.getClassKey().getClassName()
+ if className != "queue":
+ return
+
+ id = record.getObjectId().__repr__()
+ if id not in self.objects:
+ return
+
+ (name, first, last) = self.objects[id]
+ if first == None:
+ self.objects[id] = (name, record, None)
+ return
+
+ if len(self.filter) > 0 :
+ match = False
+
+ for x in self.filter:
+ if x.match(name):
+ match = True
+ break
+ if match == False:
+ return
+
+ if last == None:
+ lastSample = first
+ else:
+ lastSample = last
+
+ self.objects[id] = (name, first, record)
+
+ deltaTime = float (record.getTimestamps()[0] - lastSample.getTimestamps()[0])
+ if deltaTime < 1000000000.0:
+ return
+ enqueueRate = float (record.msgTotalEnqueues - lastSample.msgTotalEnqueues) / \
+ (deltaTime / 1000000000.0)
+ dequeueRate = float (record.msgTotalDequeues - lastSample.msgTotalDequeues) / \
+ (deltaTime / 1000000000.0)
+ print "%-41s%10.2f%11d%13.2f%13.2f" % \
+ (name, deltaTime / 1000000000, record.msgDepth, enqueueRate, dequeueRate)
+ sys.stdout.flush()
+
+
+ def Display (self):
+ self.session.bindClass("org.apache.qpid.broker", "queue")
+ print "Queue Name Sec Depth Enq Rate Deq Rate"
+ print "========================================================================================"
+ sys.stdout.flush()
+ try:
+ while True:
+ sleep (1)
+ if self.firstError and self.broker.getError():
+ self.firstError = False
+ print "*** Error: %s, retrying..." % self.broker.getError()
+ except KeyboardInterrupt:
+ print
+ self.session.delBroker(self.broker)
##
## Main Program
@@ -157,8 +137,7 @@ def main():
for s in options.filter.split(","):
filter.append(re.compile(s))
- bm = BrokerManager ()
- bm.SetBroker (Broker (host))
+ bm = BrokerManager(host)
bm.setFilter(filter)
bm.Display()
diff --git a/python/commands/qpid-route b/python/commands/qpid-route
index 3cd9109a6a..9965047000 100755
--- a/python/commands/qpid-route
+++ b/python/commands/qpid-route
@@ -22,280 +22,379 @@
import getopt
import sys
import socket
-import qpid
import os
-from qpid.management import managementClient
-from qpid.managementdata import Broker
-from qpid.peer import Closed
-from qpid.connection import Connection, ConnectionFailed
-from qpid.util import connect
-
-def Usage ():
- print "Usage: qpid-route [OPTIONS] link add <dest-broker> <src-broker>"
- print " qpid-route [OPTIONS] link del <dest-broker> <src-broker>"
- print " qpid-route [OPTIONS] link list [<dest-broker>]"
+import locale
+from qmf.console import Session, BrokerURL
+
+def Usage():
+ print "Usage: qpid-route [OPTIONS] dynamic add <dest-broker> <src-broker> <exchange> [tag] [exclude-list]"
+ print " qpid-route [OPTIONS] dynamic del <dest-broker> <src-broker> <exchange>"
print
print " qpid-route [OPTIONS] route add <dest-broker> <src-broker> <exchange> <routing-key> [tag] [exclude-list]"
print " qpid-route [OPTIONS] route del <dest-broker> <src-broker> <exchange> <routing-key>"
+ print " qpid-route [OPTIONS] queue add <dest-broker> <src-broker> <exchange> <queue>"
+ print " qpid-route [OPTIONS] queue del <dest-broker> <src-broker> <exchange> <queue>"
print " qpid-route [OPTIONS] route list [<dest-broker>]"
print " qpid-route [OPTIONS] route flush [<dest-broker>]"
+ print " qpid-route [OPTIONS] route map [<broker>]"
+ print
+ print " qpid-route [OPTIONS] link add <dest-broker> <src-broker>"
+ print " qpid-route [OPTIONS] link del <dest-broker> <src-broker>"
+ print " qpid-route [OPTIONS] link list [<dest-broker>]"
print
print "Options:"
+ print " --timeout seconds (10) Maximum time to wait for broker connection"
print " -v [ --verbose ] Verbose output"
print " -q [ --quiet ] Quiet output, don't print duplicate warnings"
print " -d [ --durable ] Added configuration shall be durable"
print " -e [ --del-empty-link ] Delete link after deleting last route on the link"
+ print " -s [ --src-local ] Make connection to source broker (push route)"
+ print " --ack N Acknowledge transfers over the bridge in batches of N"
+ print " -t <transport> [ --transport <transport>]"
+ print " Specify transport to use for links, defaults to tcp"
print
print " dest-broker and src-broker are in the form: [username/password@] hostname | ip-address [:<port>]"
print " ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost"
print
- sys.exit (1)
+ sys.exit(1)
-_verbose = False
-_quiet = False
-_durable = False
-_dellink = False
+_verbose = False
+_quiet = False
+_durable = False
+_dellink = False
+_srclocal = False
+_transport = "tcp"
+_ack = 0
+_connTimeout = 10
class RouteManager:
- def __init__ (self, destBroker):
- self.dest = Broker (destBroker)
- self.src = None
+ def __init__(self, localBroker):
+ self.local = BrokerURL(localBroker)
+ self.remote = None
+ self.qmf = Session()
+ self.broker = self.qmf.addBroker(localBroker, _connTimeout)
- def ConnectToBroker (self):
- broker = self.dest
- if _verbose:
- print "Connecting to broker: %s:%d" % (broker.host, broker.port)
- try:
- self.sessionId = "%s.%d" % (os.uname()[1], os.getpid())
- self.conn = Connection (connect (broker.host, broker.port), \
- username=broker.username, password=broker.password)
- self.conn.start ()
- self.session = self.conn.session(self.sessionId)
- self.mclient = managementClient (self.conn.spec)
- self.mch = self.mclient.addChannel (self.session)
- self.mclient.syncWaitForStable (self.mch)
- except socket.error, e:
- print "Socket Error %s - %s" % (e[0], e[1])
- sys.exit (1)
- except Closed, e:
- print "Connect Failed %d - %s" % (e[0], e[1])
- sys.exit (1)
- except ConnectionFailed, e:
- print "Connect Failed %d - %s" % (e[0], e[1])
- sys.exit(1)
-
- def Disconnect (self):
- self.mclient.removeChannel (self.mch)
- self.session.close(timeout=10)
- self.conn.close(timeout=10)
-
- def getLink (self):
- links = self.mclient.syncGetObjects (self.mch, "link")
+ def disconnect(self):
+ self.qmf.delBroker(self.broker)
+
+ def getLink(self):
+ links = self.qmf.getObjects(_class="link")
for link in links:
- if "%s:%d" % (link.host, link.port) == self.src.name ():
+ if self.remote.match(link.host, link.port):
return link
return None
- def AddLink (self, srcBroker):
- self.src = Broker (srcBroker)
- mc = self.mclient
-
- if self.dest.name() == self.src.name():
- print "Linking broker to itself is not permitted"
- sys.exit(1)
+ def addLink(self, remoteBroker):
+ self.remote = BrokerURL(remoteBroker)
+ if self.local.match(self.remote.host, self.remote.port):
+ raise Exception("Linking broker to itself is not permitted")
- brokers = mc.syncGetObjects (self.mch, "broker")
+ brokers = self.qmf.getObjects(_class="broker")
broker = brokers[0]
link = self.getLink()
- if link != None:
- print "Link already exists"
- sys.exit(1)
-
- connectArgs = {}
- connectArgs["host"] = self.src.host
- connectArgs["port"] = self.src.port
- connectArgs["useSsl"] = False
- connectArgs["durable"] = _durable
- if self.src.username == "anonymous":
- connectArgs["authMechanism"] = "ANONYMOUS"
- else:
- connectArgs["authMechanism"] = "PLAIN"
- connectArgs["username"] = self.src.username
- connectArgs["password"] = self.src.password
- res = mc.syncCallMethod (self.mch, broker.id, broker.classKey, "connect", connectArgs)
- if _verbose:
- print "Connect method returned:", res.status, res.statusText
- link = self.getLink ()
-
- def DelLink (self, srcBroker):
- self.src = Broker (srcBroker)
- mc = self.mclient
+ if link == None:
+ if not self.remote.authName or self.remote.authName == "anonymous":
+ mech = "ANONYMOUS"
+ else:
+ mech = "PLAIN"
+ res = broker.connect(self.remote.host, self.remote.port, _durable,
+ mech, self.remote.authName or "", self.remote.authPass or "",
+ _transport)
+ if _verbose:
+ print "Connect method returned:", res.status, res.text
- brokers = mc.syncGetObjects (self.mch, "broker")
+ def delLink(self, remoteBroker):
+ self.remote = BrokerURL(remoteBroker)
+ brokers = self.qmf.getObjects(_class="broker")
broker = brokers[0]
link = self.getLink()
if link == None:
- print "Link not found"
- sys.exit(1)
+ raise Exception("Link not found")
- res = mc.syncCallMethod (self.mch, link.id, link.classKey, "close")
+ res = link.close()
if _verbose:
- print "Close method returned:", res.status, res.statusText
+ print "Close method returned:", res.status, res.text
- def ListLinks (self):
- mc = self.mclient
- links = mc.syncGetObjects (self.mch, "link")
+ def listLinks(self):
+ links = self.qmf.getObjects(_class="link")
if len(links) == 0:
print "No Links Found"
else:
print
- print "Host Port Durable State Last Error"
- print "==================================================================="
+ print "Host Port Transport Durable State Last Error"
+ print "============================================================================="
+ for link in links:
+ print "%-16s%-8d%-13s%c %-18s%s" % \
+ (link.host, link.port, link.transport, YN(link.durable), link.state, link.lastError)
+
+ def mapRoutes(self):
+ qmf = self.qmf
+ print
+ print "Finding Linked Brokers:"
+
+ brokerList = {}
+ brokerList[self.local.name()] = self.broker
+ print " %s... Ok" % self.local
+
+ added = True
+ while added:
+ added = False
+ links = qmf.getObjects(_class="link")
for link in links:
- print "%-16s%-8d %c %-18s%s" % (link.host, link.port, YN(link.durable), link.state, link.lastError)
+ url = BrokerURL("%s:%d" % (link.host, link.port))
+ if url.name() not in brokerList:
+ print " %s..." % url.name(),
+ try:
+ b = qmf.addBroker("%s:%d" % (link.host, link.port), _connTimeout)
+ brokerList[url.name()] = b
+ added = True
+ print "Ok"
+ except Exception, e:
+ print e
+
+ print
+ print "Dynamic Routes:"
+ bridges = qmf.getObjects(_class="bridge", dynamic=True)
+ fedExchanges = []
+ for bridge in bridges:
+ if bridge.src not in fedExchanges:
+ fedExchanges.append(bridge.src)
+ if len(fedExchanges) == 0:
+ print " none found"
+ print
+
+ for ex in fedExchanges:
+ print " Exchange %s:" % ex
+ pairs = []
+ for bridge in bridges:
+ if bridge.src == ex:
+ link = bridge._linkRef_
+ fromUrl = "%s:%s" % (link.host, link.port)
+ toUrl = bridge.getBroker().getUrl()
+ found = False
+ for pair in pairs:
+ if pair.matches(fromUrl, toUrl):
+ found = True
+ if not found:
+ pairs.append(RoutePair(fromUrl, toUrl))
+ for pair in pairs:
+ print " %s" % pair
+ print
- def AddRoute (self, srcBroker, exchange, routingKey, tag, excludes):
- self.src = Broker (srcBroker)
- mc = self.mclient
+ print "Static Routes:"
+ bridges = qmf.getObjects(_class="bridge", dynamic=False)
+ if len(bridges) == 0:
+ print " none found"
+ print
- if self.dest.name() == self.src.name():
- print "Linking broker to itself is not permitted"
- sys.exit(1)
+ for bridge in bridges:
+ link = bridge._linkRef_
+ fromUrl = "%s:%s" % (link.host, link.port)
+ toUrl = bridge.getBroker().getUrl()
+ leftType = "ex"
+ rightType = "ex"
+ if bridge.srcIsLocal:
+ arrow = "=>"
+ left = bridge.src
+ right = bridge.dest
+ if bridge.srcIsQueue:
+ leftType = "queue"
+ else:
+ arrow = "<="
+ left = bridge.dest
+ right = bridge.src
+ if bridge.srcIsQueue:
+ rightType = "queue"
+
+ if bridge.srcIsQueue:
+ print " %s(%s=%s) %s %s(%s=%s)" % \
+ (toUrl, leftType, left, arrow, fromUrl, rightType, right)
+ else:
+ print " %s(%s=%s) %s %s(%s=%s) key=%s" % \
+ (toUrl, leftType, left, arrow, fromUrl, rightType, right, bridge.key)
+ print
+
+ for broker in brokerList:
+ if broker != self.local.name():
+ qmf.delBroker(brokerList[broker])
- brokers = mc.syncGetObjects (self.mch, "broker")
- broker = brokers[0]
- link = self.getLink ()
+ def addRoute(self, remoteBroker, exchange, routingKey, tag, excludes, dynamic=False):
+ if dynamic and _srclocal:
+ raise Exception("--src-local is not permitted on dynamic routes")
+
+ self.addLink(remoteBroker)
+ link = self.getLink()
if link == None:
- if _verbose:
- print "Inter-broker link not found, creating..."
-
- connectArgs = {}
- connectArgs["host"] = self.src.host
- connectArgs["port"] = self.src.port
- connectArgs["useSsl"] = False
- connectArgs["durable"] = _durable
- if self.src.username == "anonymous":
- connectArgs["authMechanism"] = "ANONYMOUS"
- else:
- connectArgs["authMechanism"] = "PLAIN"
- connectArgs["username"] = self.src.username
- connectArgs["password"] = self.src.password
- res = mc.syncCallMethod (self.mch, broker.id, broker.classKey, "connect", connectArgs)
- if _verbose:
- print "Connect method returned:", res.status, res.statusText
- link = self.getLink ()
+ raise Exception("Link failed to create")
+ bridges = self.qmf.getObjects(_class="bridge")
+ for bridge in bridges:
+ if bridge.linkRef == link.getObjectId() and \
+ bridge.dest == exchange and bridge.key == routingKey and not bridge.srcIsQueue:
+ if not _quiet:
+ raise Exception("Duplicate Route - ignoring: %s(%s)" % (exchange, routingKey))
+ sys.exit(0)
+
+ if _verbose:
+ print "Creating inter-broker binding..."
+ res = link.bridge(_durable, exchange, exchange, routingKey, tag, excludes, False, _srclocal, dynamic, _ack)
+ if res.status != 0:
+ raise Exception(res.text)
+ if _verbose:
+ print "Bridge method returned:", res.status, res.text
+
+ def addQueueRoute(self, remoteBroker, exchange, queue):
+ self.addLink(remoteBroker)
+ link = self.getLink()
if link == None:
- print "Protocol Error - Missing link ID"
- sys.exit (1)
+ raise Exception("Link failed to create")
- bridges = mc.syncGetObjects (self.mch, "bridge")
+ bridges = self.qmf.getObjects(_class="bridge")
for bridge in bridges:
- if bridge.linkRef == link.id and bridge.dest == exchange and bridge.key == routingKey:
+ if bridge.linkRef == link.getObjectId() and \
+ bridge.dest == exchange and bridge.src == queue and bridge.srcIsQueue:
if not _quiet:
- print "Duplicate Route - ignoring: %s(%s)" % (exchange, routingKey)
- sys.exit (1)
- sys.exit (0)
+ raise Exception("Duplicate Route - ignoring: %s(%s)" % (exchange, queue))
+ sys.exit(0)
if _verbose:
print "Creating inter-broker binding..."
- bridgeArgs = {}
- bridgeArgs["durable"] = _durable
- bridgeArgs["src"] = exchange
- bridgeArgs["dest"] = exchange
- bridgeArgs["key"] = routingKey
- bridgeArgs["tag"] = tag
- bridgeArgs["excludes"] = excludes
- bridgeArgs["srcIsQueue"] = 0
- bridgeArgs["srcIsLocal"] = 0
- res = mc.syncCallMethod (self.mch, link.id, link.classKey, "bridge", bridgeArgs)
- if res.status == 4:
- print "Can't create a durable route on a non-durable link"
- sys.exit(1)
+ res = link.bridge(_durable, queue, exchange, "", "", "", True, _srclocal, False, _ack)
+ if res.status != 0:
+ raise Exception(res.text)
if _verbose:
- print "Bridge method returned:", res.status, res.statusText
+ print "Bridge method returned:", res.status, res.text
- def DelRoute (self, srcBroker, exchange, routingKey):
- self.src = Broker (srcBroker)
- mc = self.mclient
+ def delQueueRoute(self, remoteBroker, exchange, queue):
+ self.remote = BrokerURL(remoteBroker)
+ link = self.getLink()
+ if link == None:
+ if not _quiet:
+ raise Exception("No link found from %s to %s" % (self.remote.name(), self.local.name()))
+ sys.exit(0)
+
+ bridges = self.qmf.getObjects(_class="bridge")
+ for bridge in bridges:
+ if bridge.linkRef == link.getObjectId() and \
+ bridge.dest == exchange and bridge.src == queue and bridge.srcIsQueue:
+ if _verbose:
+ print "Closing bridge..."
+ res = bridge.close()
+ if res.status != 0:
+ raise Exception("Error closing bridge: %d - %s" % (res.status, res.text))
+ if len(bridges) == 1 and _dellink:
+ link = self.getLink()
+ if link == None:
+ sys.exit(0)
+ if _verbose:
+ print "Last bridge on link, closing link..."
+ res = link.close()
+ if res.status != 0:
+ raise Exception("Error closing link: %d - %s" % (res.status, res.text))
+ sys.exit(0)
+ if not _quiet:
+ raise Exception("Route not found")
- link = self.getLink ()
+ def delRoute(self, remoteBroker, exchange, routingKey, dynamic=False):
+ self.remote = BrokerURL(remoteBroker)
+ link = self.getLink()
if link == None:
if not _quiet:
- print "No link found from %s to %s" % (self.src.name(), self.dest.name())
- sys.exit (1)
- sys.exit (0)
+ raise Exception("No link found from %s to %s" % (self.remote.name(), self.local.name()))
+ sys.exit(0)
- bridges = mc.syncGetObjects (self.mch, "bridge")
+ bridges = self.qmf.getObjects(_class="bridge")
for bridge in bridges:
- if bridge.linkRef == link.id and bridge.dest == exchange and bridge.key == routingKey:
+ if bridge.linkRef == link.getObjectId() and bridge.dest == exchange and bridge.key == routingKey \
+ and bridge.dynamic == dynamic:
if _verbose:
print "Closing bridge..."
- res = mc.syncCallMethod (self.mch, bridge.id, bridge.classKey, "close")
+ res = bridge.close()
if res.status != 0:
- print "Error closing bridge: %d - %s" % (res.status, res.statusText)
- sys.exit (1)
- if len (bridges) == 1 and _dellink:
- link = self.getLink ()
+ raise Exception("Error closing bridge: %d - %s" % (res.status, res.text))
+ if len(bridges) == 1 and _dellink:
+ link = self.getLink()
if link == None:
- sys.exit (0)
+ sys.exit(0)
if _verbose:
print "Last bridge on link, closing link..."
- res = mc.syncCallMethod (self.mch, link.id, link.classKey, "close")
+ res = link.close()
if res.status != 0:
- print "Error closing link: %d - %s" % (res.status, res.statusText)
- sys.exit (1)
- sys.exit (0)
+ raise Exception("Error closing link: %d - %s" % (res.status, res.text))
+ sys.exit(0)
if not _quiet:
- print "Route not found"
- sys.exit (1)
+ raise Exception("Route not found")
- def ListRoutes (self):
- mc = self.mclient
- links = mc.syncGetObjects (self.mch, "link")
- bridges = mc.syncGetObjects (self.mch, "bridge")
+ def listRoutes(self):
+ links = self.qmf.getObjects(_class="link")
+ bridges = self.qmf.getObjects(_class="bridge")
for bridge in bridges:
myLink = None
for link in links:
- if bridge.linkRef == link.id:
+ if bridge.linkRef == link.getObjectId():
myLink = link
break
if myLink != None:
- print "%s %s:%d %s %s" % (self.dest.name(), myLink.host, myLink.port, bridge.dest, bridge.key)
+ if bridge.dynamic:
+ keyText = "<dynamic>"
+ else:
+ keyText = bridge.key
+ print "%s %s:%d %s %s" % (self.local.name(), myLink.host, myLink.port, bridge.dest, keyText)
- def ClearAllRoutes (self):
- mc = self.mclient
- links = mc.syncGetObjects (self.mch, "link")
- bridges = mc.syncGetObjects (self.mch, "bridge")
+ def clearAllRoutes(self):
+ links = self.qmf.getObjects(_class="link")
+ bridges = self.qmf.getObjects(_class="bridge")
for bridge in bridges:
if _verbose:
myLink = None
for link in links:
- if bridge.linkRef == link.id:
+ if bridge.linkRef == link.getObjectId():
myLink = link
break
if myLink != None:
print "Deleting Bridge: %s:%d %s %s... " % (myLink.host, myLink.port, bridge.dest, bridge.key),
- res = mc.syncCallMethod (self.mch, bridge.id, bridge.classKey, "close")
+ res = bridge.close()
if res.status != 0:
- print "Error: %d - %s" % (res.status, res.statusText)
+ print "Error: %d - %s" % (res.status, res.text)
elif _verbose:
print "Ok"
if _dellink:
- links = mc.syncGetObjects (self.mch, "link")
+ links = self.qmf.getObjects(_class="link")
for link in links:
if _verbose:
print "Deleting Link: %s:%d... " % (link.host, link.port),
- res = mc.syncCallMethod (self.mch, link.id, link.classKey, "close")
+ res = link.close()
if res.status != 0:
- print "Error: %d - %s" % (res.status, res.statusText)
+ print "Error: %d - %s" % (res.status, res.text)
elif _verbose:
print "Ok"
+class RoutePair:
+ def __init__(self, fromUrl, toUrl):
+ self.fromUrl = fromUrl
+ self.toUrl = toUrl
+ self.bidir = False
+
+ def __repr__(self):
+ if self.bidir:
+ delimit = "<=>"
+ else:
+ delimit = " =>"
+ return "%s %s %s" % (self.fromUrl, delimit, self.toUrl)
+
+ def matches(self, fromUrl, toUrl):
+ if fromUrl == self.fromUrl and toUrl == self.toUrl:
+ return True
+ if toUrl == self.fromUrl and fromUrl == self.toUrl:
+ self.bidir = True
+ return True
+ return False
+
+
def YN(val):
if val == 1:
return 'Y'
@@ -306,12 +405,22 @@ def YN(val):
##
try:
- longOpts = ("verbose", "quiet", "durable", "del-empty-link")
- (optlist, cargs) = getopt.gnu_getopt (sys.argv[1:], "vqde", longOpts)
+ longOpts = ("verbose", "quiet", "durable", "del-empty-link", "src-local", "transport=", "ack=", "timeout=")
+ (optlist, encArgs) = getopt.gnu_getopt(sys.argv[1:], "vqdest:", longOpts)
except:
- Usage ()
+ Usage()
+
+try:
+ encoding = locale.getpreferredencoding()
+ cargs = [a.decode(encoding) for a in encArgs]
+except:
+ cargs = encArgs
for opt in optlist:
+ if opt[0] == "--timeout":
+ _connTimeout = int(opt[1])
+ if _connTimeout == 0:
+ _connTimeout = None
if opt[0] == "-v" or opt[0] == "--verbose":
_verbose = True
if opt[0] == "-q" or opt[0] == "--quiet":
@@ -320,52 +429,96 @@ for opt in optlist:
_durable = True
if opt[0] == "-e" or opt[0] == "--del-empty-link":
_dellink = True
-
-nargs = len (cargs)
+ if opt[0] == "-s" or opt[0] == "--src-local":
+ _srclocal = True
+ if opt[0] == "-t" or opt[0] == "--transport":
+ _transport = opt[1]
+ if opt[0] == "--ack":
+ _ack = int(opt[1])
+
+nargs = len(cargs)
if nargs < 2:
- Usage ()
+ Usage()
if nargs == 2:
- destBroker = "localhost"
+ localBroker = "localhost"
else:
- destBroker = cargs[2]
+ if _srclocal:
+ localBroker = cargs[3]
+ remoteBroker = cargs[2]
+ else:
+ localBroker = cargs[2]
+ if nargs > 3:
+ remoteBroker = cargs[3]
group = cargs[0]
cmd = cargs[1]
-rm = RouteManager (destBroker)
-rm.ConnectToBroker ()
-if group == "link":
- if cmd == "add":
- if nargs != 4:
- Usage()
- rm.AddLink (cargs[3])
- elif cmd == "del":
- if nargs != 4:
- Usage()
- rm.DelLink (cargs[3])
- elif cmd == "list":
- rm.ListLinks ()
-
-elif group == "route":
- if cmd == "add":
- if nargs < 6 or nargs > 8:
- Usage ()
-
- tag = ""
- excludes = ""
- if nargs > 6: tag = cargs[6]
- if nargs > 7: excludes = cargs[7]
- rm.AddRoute (cargs[3], cargs[4], cargs[5], tag, excludes)
- elif cmd == "del":
- if nargs != 6:
- Usage ()
+try:
+ rm = RouteManager(localBroker)
+ if group == "link":
+ if cmd == "add":
+ if nargs != 4:
+ Usage()
+ rm.addLink(remoteBroker)
+ elif cmd == "del":
+ if nargs != 4:
+ Usage()
+ rm.delLink(remoteBroker)
+ elif cmd == "list":
+ rm.listLinks()
+
+ elif group == "dynamic":
+ if cmd == "add":
+ if nargs < 5 or nargs > 7:
+ Usage()
+
+ tag = ""
+ excludes = ""
+ if nargs > 5: tag = cargs[5]
+ if nargs > 6: excludes = cargs[6]
+ rm.addRoute(remoteBroker, cargs[4], "", tag, excludes, dynamic=True)
+ elif cmd == "del":
+ if nargs != 5:
+ Usage()
+ else:
+ rm.delRoute(remoteBroker, cargs[4], "", dynamic=True)
+
+ elif group == "route":
+ if cmd == "add":
+ if nargs < 6 or nargs > 8:
+ Usage()
+
+ tag = ""
+ excludes = ""
+ if nargs > 6: tag = cargs[6]
+ if nargs > 7: excludes = cargs[7]
+ rm.addRoute(remoteBroker, cargs[4], cargs[5], tag, excludes, dynamic=False)
+ elif cmd == "del":
+ if nargs != 6:
+ Usage()
+ rm.delRoute(remoteBroker, cargs[4], cargs[5], dynamic=False)
+ elif cmd == "map":
+ rm.mapRoutes()
else:
- rm.DelRoute (cargs[3], cargs[4], cargs[5])
- else:
- if cmd == "list":
- rm.ListRoutes ()
- elif cmd == "flush":
- rm.ClearAllRoutes ()
+ if cmd == "list":
+ rm.listRoutes()
+ elif cmd == "flush":
+ rm.clearAllRoutes()
+ else:
+ Usage()
+
+ elif group == "queue":
+ if nargs != 6:
+ Usage()
+ if cmd == "add":
+ rm.addQueueRoute(remoteBroker, exchange=cargs[4], queue=cargs[5])
+ elif cmd == "del":
+ rm.delQueueRoute(remoteBroker, exchange=cargs[4], queue=cargs[5])
else:
- Usage ()
-rm.Disconnect ()
+ Usage()
+
+except Exception,e:
+ print "Failed: %s - %s" % (e.__class__.__name__, e)
+ sys.exit(1)
+
+rm.disconnect()
diff --git a/python/commands/qpid-stat b/python/commands/qpid-stat
new file mode 100755
index 0000000000..29deeb2342
--- /dev/null
+++ b/python/commands/qpid-stat
@@ -0,0 +1,460 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import getopt
+import sys
+import locale
+import socket
+import re
+from qmf.console import Session, Console
+from qpid.disp import Display, Header, Sorter
+
+_host = "localhost"
+_connTimeout = 10
+_types = ""
+_limit = 50
+_increasing = False
+_sortcol = None
+pattern = re.compile("^\\d+\\.\\d+\\.\\d+\\.\\d+:\\d+$")
+
+def Usage ():
+ print "Usage: qpid-stat [OPTIONS] [broker-addr]"
+ print
+ print " broker-addr is in the form: [username/password@] hostname | ip-address [:<port>]"
+ print " ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost"
+ print
+ print "General Options:"
+ print " --timeout seconds (10) Maximum time to wait for broker connection"
+# print " -n [--numeric] Don't resolve names"
+ print
+ print "Display Options:"
+ print
+ print " -b Show Brokers"
+ print " -c Show Connections"
+# print " -s Show Sessions"
+ print " -e Show Exchanges"
+ print " -q Show Queues"
+ print
+ print " -S [--sort-by] COLNAME Sort by column name"
+ print " -I [--increasing] Sort by increasing value (default = decreasing)"
+ print " -L [--limit] NUM Limit output to NUM rows (default = 50)"
+ print
+ sys.exit (1)
+
+class IpAddr:
+ def __init__(self, text):
+ if text.find("@") != -1:
+ tokens = text.split("@")
+ text = tokens[1]
+ if text.find(":") != -1:
+ tokens = text.split(":")
+ text = tokens[0]
+ self.port = int(tokens[1])
+ else:
+ self.port = 5672
+ self.dottedQuad = socket.gethostbyname(text)
+ nums = self.dottedQuad.split(".")
+ self.addr = (int(nums[0]) << 24) + (int(nums[1]) << 16) + (int(nums[2]) << 8) + int(nums[3])
+
+ def bestAddr(self, addrPortList):
+ bestDiff = 0xFFFFFFFFL
+ bestAddr = None
+ for addrPort in addrPortList:
+ diff = IpAddr(addrPort[0]).addr ^ self.addr
+ if diff < bestDiff:
+ bestDiff = diff
+ bestAddr = addrPort
+ return bestAddr
+
+class Broker(object):
+ def __init__(self, qmf, broker):
+ self.broker = broker
+
+ agents = qmf.getAgents()
+ for a in agents:
+ if a.getAgentBank() == 0:
+ self.brokerAgent = a
+
+ bobj = qmf.getObjects(_class="broker", _package="org.apache.qpid.broker", _agent=self.brokerAgent)[0]
+ self.currentTime = bobj.getTimestamps()[0]
+ try:
+ self.uptime = bobj.uptime
+ except:
+ self.uptime = 0
+ self.connections = {}
+ self.sessions = {}
+ self.exchanges = {}
+ self.queues = {}
+ package = "org.apache.qpid.broker"
+
+ list = qmf.getObjects(_class="connection", _package=package, _agent=self.brokerAgent)
+ for conn in list:
+ if pattern.match(conn.address):
+ self.connections[conn.getObjectId()] = conn
+
+ list = qmf.getObjects(_class="session", _package=package, _agent=self.brokerAgent)
+ for sess in list:
+ if sess.connectionRef in self.connections:
+ self.sessions[sess.getObjectId()] = sess
+
+ list = qmf.getObjects(_class="exchange", _package=package, _agent=self.brokerAgent)
+ for exchange in list:
+ self.exchanges[exchange.getObjectId()] = exchange
+
+ list = qmf.getObjects(_class="queue", _package=package, _agent=self.brokerAgent)
+ for queue in list:
+ self.queues[queue.getObjectId()] = queue
+
+ def getName(self):
+ return self.broker.getUrl()
+
+ def getCurrentTime(self):
+ return self.currentTime
+
+ def getUptime(self):
+ return self.uptime
+
+class BrokerManager(Console):
+ def __init__(self):
+ self.brokerName = None
+ self.qmf = None
+ self.broker = None
+ self.brokers = []
+ self.cluster = None
+
+ def SetBroker(self, brokerUrl):
+ self.url = brokerUrl
+ self.qmf = Session()
+ self.broker = self.qmf.addBroker(brokerUrl, _connTimeout)
+ agents = self.qmf.getAgents()
+ for a in agents:
+ if a.getAgentBank() == 0:
+ self.brokerAgent = a
+
+ def Disconnect(self):
+ if self.broker:
+ self.qmf.delBroker(self.broker)
+
+ def _getCluster(self):
+ packages = self.qmf.getPackages()
+ if "org.apache.qpid.cluster" not in packages:
+ return None
+
+ clusters = self.qmf.getObjects(_class="cluster", _agent=self.brokerAgent)
+ if len(clusters) == 0:
+ print "Clustering is installed but not enabled on the broker."
+ return None
+
+ self.cluster = clusters[0]
+
+ def _getHostList(self, urlList):
+ hosts = []
+ hostAddr = IpAddr(_host)
+ for url in urlList:
+ if url.find("amqp:") != 0:
+ raise Exception("Invalid URL 1")
+ url = url[5:]
+ addrs = str(url).split(",")
+ addrList = []
+ for addr in addrs:
+ tokens = addr.split(":")
+ if len(tokens) != 3:
+ raise Exception("Invalid URL 2")
+ addrList.append((tokens[1], tokens[2]))
+
+ # Find the address in the list that is most likely to be in the same subnet as the address
+ # with which we made the original QMF connection. This increases the probability that we will
+ # be able to reach the cluster member.
+
+ best = hostAddr.bestAddr(addrList)
+ bestUrl = best[0] + ":" + best[1]
+ hosts.append(bestUrl)
+ return hosts
+
+ def displaySubs(self, subs, indent, broker=None, conn=None, sess=None, exchange=None, queue=None):
+ if len(subs) == 0:
+ return
+ this = subs[0]
+ remaining = subs[1:]
+ newindent = indent + " "
+ if this == 'b':
+ pass
+ elif this == 'c':
+ if broker:
+ for oid in broker.connections:
+ iconn = broker.connections[oid]
+ self.printConnSub(indent, broker.getName(), iconn)
+ self.displaySubs(remaining, newindent, broker=broker, conn=iconn,
+ sess=sess, exchange=exchange, queue=queue)
+ elif this == 's':
+ pass
+ elif this == 'e':
+ pass
+ elif this == 'q':
+ pass
+ print
+
+ def displayBroker(self, subs):
+ disp = Display(prefix=" ")
+ heads = []
+ heads.append(Header('broker'))
+ heads.append(Header('cluster'))
+ heads.append(Header('uptime', Header.DURATION))
+ heads.append(Header('conn', Header.KMG))
+ heads.append(Header('sess', Header.KMG))
+ heads.append(Header('exch', Header.KMG))
+ heads.append(Header('queue', Header.KMG))
+ rows = []
+ for broker in self.brokers:
+ if self.cluster:
+ ctext = "%s(%s)" % (self.cluster.clusterName, self.cluster.status)
+ else:
+ ctext = "<standalone>"
+ row = (broker.getName(), ctext, broker.getUptime(),
+ len(broker.connections), len(broker.sessions),
+ len(broker.exchanges), len(broker.queues))
+ rows.append(row)
+ title = "Brokers"
+ if _sortcol:
+ sorter = Sorter(heads, rows, _sortcol, _limit, _increasing)
+ dispRows = sorter.getSorted()
+ else:
+ dispRows = rows
+ disp.formattedTable(title, heads, dispRows)
+
+ def displayConn(self, subs):
+ disp = Display(prefix=" ")
+ heads = []
+ if self.cluster:
+ heads.append(Header('broker'))
+ heads.append(Header('client-addr'))
+ heads.append(Header('cproc'))
+ heads.append(Header('cpid'))
+ heads.append(Header('auth'))
+ heads.append(Header('connected', Header.DURATION))
+ heads.append(Header('idle', Header.DURATION))
+ heads.append(Header('msgIn', Header.KMG))
+ heads.append(Header('msgOut', Header.KMG))
+ rows = []
+ for broker in self.brokers:
+ for oid in broker.connections:
+ conn = broker.connections[oid]
+ row = []
+ if self.cluster:
+ row.append(broker.getName())
+ row.append(conn.address)
+ row.append(conn.remoteProcessName)
+ row.append(conn.remotePid)
+ row.append(conn.authIdentity)
+ row.append(broker.getCurrentTime() - conn.getTimestamps()[1])
+ idle = broker.getCurrentTime() - conn.getTimestamps()[0]
+ row.append(broker.getCurrentTime() - conn.getTimestamps()[0])
+ row.append(conn.framesFromClient)
+ row.append(conn.framesToClient)
+ rows.append(row)
+ title = "Connections"
+ if self.cluster:
+ title += " for cluster '%s'" % self.cluster.clusterName
+ if _sortcol:
+ sorter = Sorter(heads, rows, _sortcol, _limit, _increasing)
+ dispRows = sorter.getSorted()
+ else:
+ dispRows = rows
+ disp.formattedTable(title, heads, dispRows)
+
+ def displaySession(self, subs):
+ disp = Display(prefix=" ")
+
+ def displayExchange(self, subs):
+ disp = Display(prefix=" ")
+ heads = []
+ if self.cluster:
+ heads.append(Header('broker'))
+ heads.append(Header("exchange"))
+ heads.append(Header("type"))
+ heads.append(Header("dur", Header.Y))
+ heads.append(Header("bind", Header.KMG))
+ heads.append(Header("msgIn", Header.KMG))
+ heads.append(Header("msgOut", Header.KMG))
+ heads.append(Header("msgDrop", Header.KMG))
+ heads.append(Header("byteIn", Header.KMG))
+ heads.append(Header("byteOut", Header.KMG))
+ heads.append(Header("byteDrop", Header.KMG))
+ rows = []
+ for broker in self.brokers:
+ for oid in broker.exchanges:
+ ex = broker.exchanges[oid]
+ row = []
+ if self.cluster:
+ row.append(broker.getName())
+ row.append(ex.name)
+ row.append(ex.type)
+ row.append(ex.durable)
+ row.append(ex.bindingCount)
+ row.append(ex.msgReceives)
+ row.append(ex.msgRoutes)
+ row.append(ex.msgDrops)
+ row.append(ex.byteReceives)
+ row.append(ex.byteRoutes)
+ row.append(ex.byteDrops)
+ rows.append(row)
+ title = "Exchanges"
+ if self.cluster:
+ title += " for cluster '%s'" % self.cluster.clusterName
+ if _sortcol:
+ sorter = Sorter(heads, rows, _sortcol, _limit, _increasing)
+ dispRows = sorter.getSorted()
+ else:
+ dispRows = rows
+ disp.formattedTable(title, heads, dispRows)
+
+ def displayQueue(self, subs):
+ disp = Display(prefix=" ")
+ heads = []
+ if self.cluster:
+ heads.append(Header('broker'))
+ heads.append(Header("queue"))
+ heads.append(Header("dur", Header.Y))
+ heads.append(Header("autoDel", Header.Y))
+ heads.append(Header("excl", Header.Y))
+ heads.append(Header("msg", Header.KMG))
+ heads.append(Header("msgIn", Header.KMG))
+ heads.append(Header("msgOut", Header.KMG))
+ heads.append(Header("bytes", Header.KMG))
+ heads.append(Header("bytesIn", Header.KMG))
+ heads.append(Header("bytesOut", Header.KMG))
+ heads.append(Header("cons", Header.KMG))
+ heads.append(Header("bind", Header.KMG))
+ rows = []
+ for broker in self.brokers:
+ for oid in broker.queues:
+ q = broker.queues[oid]
+ row = []
+ if self.cluster:
+ row.append(broker.getName())
+ row.append(q.name)
+ row.append(q.durable)
+ row.append(q.autoDelete)
+ row.append(q.exclusive)
+ row.append(q.msgDepth)
+ row.append(q.msgTotalEnqueues)
+ row.append(q.msgTotalDequeues)
+ row.append(q.byteDepth)
+ row.append(q.byteTotalEnqueues)
+ row.append(q.byteTotalDequeues)
+ row.append(q.consumerCount)
+ row.append(q.bindingCount)
+ rows.append(row)
+ title = "Queues"
+ if self.cluster:
+ title += " for cluster '%s'" % self.cluster.clusterName
+ if _sortcol:
+ sorter = Sorter(heads, rows, _sortcol, _limit, _increasing)
+ dispRows = sorter.getSorted()
+ else:
+ dispRows = rows
+ disp.formattedTable(title, heads, dispRows)
+
+ def displayMain(self, main, subs):
+ if main == 'b': self.displayBroker(subs)
+ elif main == 'c': self.displayConn(subs)
+ elif main == 's': self.displaySession(subs)
+ elif main == 'e': self.displayExchange(subs)
+ elif main == 'q': self.displayQueue(subs)
+
+ def display(self):
+ self._getCluster()
+ if self.cluster:
+ memberList = self.cluster.members.split(";")
+ hostList = self._getHostList(memberList)
+ self.qmf.delBroker(self.broker)
+ self.broker = None
+ if _host.find("@") > 0:
+ authString = _host.split("@")[0] + "@"
+ else:
+ authString = ""
+ for host in hostList:
+ b = self.qmf.addBroker(authString + host, _connTimeout)
+ self.brokers.append(Broker(self.qmf, b))
+ else:
+ self.brokers.append(Broker(self.qmf, self.broker))
+
+ self.displayMain(_types[0], _types[1:])
+
+
+##
+## Main Program
+##
+
+try:
+ longOpts = ("top", "numeric", "sort-by=", "limit=", "increasing", "timeout=")
+ (optlist, encArgs) = getopt.gnu_getopt(sys.argv[1:], "bceqS:L:I", longOpts)
+except:
+ Usage()
+
+try:
+ encoding = locale.getpreferredencoding()
+ cargs = [a.decode(encoding) for a in encArgs]
+except:
+ cargs = encArgs
+
+for opt in optlist:
+ if opt[0] == "--timeout":
+ _connTimeout = int(opt[1])
+ if _connTimeout == 0:
+ _connTimeout = None
+ elif opt[0] == "-n" or opt[0] == "--numeric":
+ _numeric = True
+ elif opt[0] == "-S" or opt[0] == "--sort-by":
+ _sortcol = opt[1]
+ elif opt[0] == "-I" or opt[0] == "--increasing":
+ _increasing = True
+ elif opt[0] == "-L" or opt[0] == "--limit":
+ _limit = int(opt[1])
+ elif len(opt[0]) == 2:
+ char = opt[0][1]
+ if "bcseq".find(char) != -1:
+ _types += char
+ else:
+ Usage()
+ else:
+ Usage()
+
+if len(_types) == 0:
+ Usage()
+
+nargs = len(cargs)
+bm = BrokerManager()
+
+if nargs == 1:
+ _host = cargs[0]
+
+try:
+ bm.SetBroker(_host)
+ bm.display()
+except KeyboardInterrupt:
+ print
+except Exception,e:
+ print "Failed: %s - %s" % (e.__class__.__name__, e)
+ sys.exit(1)
+
+bm.Disconnect()
diff --git a/python/commands/qpid-tool b/python/commands/qpid-tool
index 60535c253b..05afcc9732 100755
--- a/python/commands/qpid-tool
+++ b/python/commands/qpid-tool
@@ -24,7 +24,7 @@ import getopt
import sys
import socket
from cmd import Cmd
-from qpid.connection import ConnectionFailed
+from qpid.connection import ConnectionFailed, Timeout
from qpid.managementdata import ManagementData
from shlex import split
from qpid.disp import Display
@@ -148,7 +148,7 @@ class Mcli (Cmd):
self.dataObject.close ()
def Usage ():
- print "Usage: qpid-tool [<target-host[:<tcp-port>]]"
+ print "Usage: qpid-tool [[<username>/<password>@]<target-host>[:<tcp-port>]]"
print
sys.exit (1)
@@ -183,6 +183,8 @@ except ConnectionFailed, e:
except Exception, e:
if str(e).find ("Exchange not found") != -1:
print "Management not enabled on broker: Use '-m yes' option on broker startup."
+ else:
+ print "Failed: %s - %s" % (e.__class__.__name__, e)
sys.exit(1)
# Instantiate the CLI interpreter and launch it.
diff --git a/python/cpp_failing_0-10.txt b/python/cpp_failing_0-10.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/python/cpp_failing_0-10.txt
+++ /dev/null
diff --git a/python/cpp_failing_0-8.txt b/python/cpp_failing_0-8.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/python/cpp_failing_0-8.txt
+++ /dev/null
diff --git a/python/cpp_failing_0-9.txt b/python/cpp_failing_0-9.txt
deleted file mode 100644
index 06c31080fb..0000000000
--- a/python/cpp_failing_0-9.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-tests_0-9.message.MessageTests.test_checkpoint
-tests_0-9.message.MessageTests.test_reject
-tests_0-9.basic.BasicTests.test_get
-
diff --git a/python/doc/test-requirements.txt b/python/doc/test-requirements.txt
index a1ba414eb2..5089b49dbe 100644
--- a/python/doc/test-requirements.txt
+++ b/python/doc/test-requirements.txt
@@ -1,3 +1,22 @@
+###############################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+###############################################################################
+
* start and stop server, possibly in different configurations, should
at least be able to specify host and port
diff --git a/python/examples/README b/python/examples/README
new file mode 100644
index 0000000000..bd30b2a6f4
--- /dev/null
+++ b/python/examples/README
@@ -0,0 +1,319 @@
+Running the Python Examples
+============================
+
+
+Running the Direct Examples
+----------------------------
+
+To run the direct examples, do the following:
+
+1. Make sure that a qpidd broker is running:
+
+ $ ps -eaf | grep qpidd
+
+ If a broker is running, you should see the qpidd process in the output of the above command.
+
+2.Declare a message queue and bind it to an exchange by running declare_queues.py, as follows:
+
+ $ python declare_queues.py
+
+ This program has no output. After this program has been run, all messages sent to the amq.direct exchange using the routing key routing_key are sent to the queue named message_queue.
+
+3.Publish a series of messages to the amq.direct exchange by running direct_producer.py, as follows:
+
+ $ python direct_producer.py
+
+This program has no output; the messages are routed to the message queue, as instructed by the binding.
+
+4. Read the messages from the message queue using direct_consumer.py or listener.py, as follows:
+
+ $ python direct_consumer.py
+
+ or
+
+ $ python listener.py
+
+You should see the following output:
+
+message 0
+message 1
+message 2
+message 3
+message 4
+message 5
+message 6
+message 7
+message 8
+message 9
+That's all, folks!
+
+
+
+Running the Fanout Examples
+----------------------------
+
+To run the programs for the Fanout example, do the following:
+
+1. Make sure that a qpidd broker is running:
+
+ $ ps -eaf | grep qpidd
+
+If a broker is running, you should see the qpidd process in the output of the above command.
+
+2. In separate windows, start two or more fanout consumers or fanout listeners as follows:
+
+ $ python fanout_consumer.py
+
+ or
+
+ $ python listener.py
+
+These programs each create a private queue, bind it to the amq.fanout exchange, and wait for messages to arrive on their queue.
+
+3. In a separate window, publish a series of messages to the amq.fanout exchange by running fanout_producer.py, as follows:
+
+ $ python fanout_producer.py
+
+This program has no output; the messages are routed to the message queue, as instructed by the binding.
+
+4. Go to the windows where you are running consumers or listeners. You should see the following output for each listener or consumer:
+
+ message 0
+ message 1
+ message 2
+ message 3
+ message 4
+ message 5
+ message 6
+ message 7
+ message 8
+ message 9
+ That's all, folks!
+
+
+
+Running the Publish-Subscribe Examples
+---------------------------------------
+
+To run the programs for the Publish-Subscribe example, do the following:
+
+1. Make sure that a qpidd broker is running:
+
+ $ ps -eaf | grep qpidd
+
+If a broker is running, you should see the qpidd process in the output of the above command.
+
+2. In separate windows, start one or more topic subscribers by running topic_subscriber.py, as follows:
+
+ $ python topic_subscriber.py
+
+You will see output similar to this:
+
+ Queues created - please start the topic producer
+ Subscribing local queue 'local_news' to news-53408183-fcee-4b92-950b-90abb297e739'
+ Subscribing local queue 'local_weather' to weather-53408183-fcee-4b92-950b-90abb297e739'
+ Subscribing local queue 'local_usa' to usa-53408183-fcee-4b92-950b-90abb297e739'
+ Subscribing local queue 'local_europe' to europe-53408183-fcee-4b92-950b-90abb297e739'
+ Messages on 'news' queue:
+
+Each topic consumer creates a set of private queues, and binds each queue to the amq.topic exchange together with a binding that indicates which messages should be routed to the queue.
+
+3.In another window, start the topic publisher, which publishes messages to the amq.topic exchange, as follows:
+
+ $ python topic_publisher.py
+
+This program has no output; the messages are routed to the message queues for each topic_consumer as specified by the bindings the consumer created.
+
+4. Go back to the window for each topic consumer. You should see output like this:
+
+ Messages on 'news' queue:
+ usa.news 0
+ usa.news 1
+ usa.news 2
+ usa.news 3
+ usa.news 4
+ europe.news 0
+ europe.news 1
+ europe.news 2
+ europe.news 3
+ europe.news 4
+ That's all, folks!
+ Messages on 'weather' queue:
+ usa.weather 0
+ usa.weather 1
+ usa.weather 2
+ usa.weather 3
+ usa.weather 4
+ europe.weather 0
+ europe.weather 1
+ europe.weather 2
+ europe.weather 3
+ europe.weather 4
+ That's all, folks!
+ Messages on 'usa' queue:
+ usa.news 0
+ usa.news 1
+ usa.news 2
+ usa.news 3
+ usa.news 4
+ usa.weather 0
+ usa.weather 1
+ usa.weather 2
+ usa.weather 3
+ usa.weather 4
+ That's all, folks!
+ Messages on 'europe' queue:
+ europe.news 0
+ europe.news 1
+ europe.news 2
+ europe.news 3
+ europe.news 4
+ europe.weather 0
+ europe.weather 1
+ europe.weather 2
+ europe.weather 3
+ europe.weather 4
+ That's all, folks!
+
+
+Running the Request/Response Examples
+--------------------------------------
+
+To run the programs for the Request/Response example, do the following:
+
+1. Make sure that a qpidd broker is running:
+
+ $ ps -eaf | grep qpidd
+
+If a broker is running, you should see the qpidd process in the output of the above command.
+
+2. Run the server.
+
+ $ python server.py
+
+You should see the following output:
+
+ Request server running - run your client now.
+ (Times out after 100 seconds ...)
+
+3. In a separate window, start a client:
+
+ $ python client.py
+
+You should see the following output:
+
+ Request: Twas brillig, and the slithy toves
+ Request: Did gyre and gimble in the wabe.
+ Request: All mimsy were the borogroves,
+ Request: And the mome raths outgrabe.
+ Messages on queue: reply_to:db0f862e-6b36-4e0f-a4b2-ad049eb435ce
+ Response: TWAS BRILLIG, AND THE SLITHY TOVES
+ Response: DID GYRE AND GIMBLE IN THE WABE.
+ Response: ALL MIMSY WERE THE BOROGROVES,
+ Response: AND THE MOME RATHS OUTGRABE.
+ No more messages!
+
+
+Running the XML-based Routing Examples
+---------------------------------------
+
+To run the programs for the XML-based Routing example, do the following:
+
+1. Make sure that a qpidd broker is running:
+
+ $ ps -eaf | grep qpidd
+
+If a broker is running, you should see the qpidd process in the output of the above command.
+
+2. Declare an XML exchange and a message queue, then bind the queue to the exchange by running declare_queues.py, as follows:
+
+ $ python declare_queues.py
+
+This program has no output. After this program has been run, all messages sent to the xml exchange using the routing key weather are sent to the queue named message_queue if they satisfy the conditions specified in the following XQuery, which is used in the binding:
+
+ let $w := ./weather
+ return $w/station = 'Raleigh-Durham International Airport (KRDU)'
+ and $w/temperature_f > 50
+ and $w/temperature_f - $w/dewpoint > 5
+ and $w/wind_speed_mph > 7
+ and $w/wind_speed_mph < 20
+
+3. Publish a series of messages to the xml exchange by running xml_producer.py, as follows:
+
+ $ python xml_producer.py
+
+The messages are routed to the message queue, as prescribed by the binding. Each message represents a weather report, such as this one:
+
+ <weather>
+ <station>Raleigh-Durham International Airport (KRDU)</station>
+ <wind_speed_mph>16</wind_speed_mph>
+ <temperature_f>70</temperature_f>
+ <dewpoint>35</dewpoint>
+ </weather>
+
+4. Read the messages from the message queue using direct_consumer.py or listener.py, as follows:
+
+ $ python xml_consumer.py
+
+ or
+
+ $ python listener.py
+
+You should see the following output:
+
+<weather><station>Raleigh-Durham International Airport (KRDU)</station>
+<wind_speed_mph>16</wind_speed_mph><temperature_f>70</temperature_f>
+<dewpoint>35</dewpoint></weather>
+
+
+Running the Headers Examples
+-----------------------------
+
+To run the headers examples, do the following:
+
+1. Make sure that a qpidd broker is running:
+
+ $ ps -eaf | grep qpidd
+
+ If a broker is running, you should see the qpidd process in the output of the above command.
+
+2.Declare a message queues and bind them to an exchange by running declare_queues.py, as follows:
+
+ $ python declare_queues.py
+
+ This program has no output. After this program has been run, all messages sent to the amq.match exchange with an application-header of {'class': 'first'} will be routed to the queue named "first" and messages with an application-header of {'class': 'second'} will be routed to the queue named "second".
+
+3.Publish a series of messages to the amq.match exchange by running headers_producer.py, as follows:
+
+ $ python headers_producer.py
+
+This program has no output; the messages are routed to the message queues, as instructed by the bindings.
+
+4. Read the messages from the message queues using headers_consumer.py as follows:
+
+ $ python headers_consumer.py
+
+You should see the following output:
+
+message(first) 0
+message(first) 1
+message(first) 2
+message(first) 3
+message(first) 4
+message(first) 5
+message(first) 6
+message(first) 7
+message(first) 8
+message(first) 9
+That's all, folks!
+message(second) 0
+message(second) 1
+message(second) 2
+message(second) 3
+message(second) 4
+message(second) 5
+message(second) 6
+message(second) 7
+message(second) 8
+message(second) 9
+That's all, folks!
diff --git a/python/examples/api/drain b/python/examples/api/drain
new file mode 100755
index 0000000000..485985f16d
--- /dev/null
+++ b/python/examples/api/drain
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import optparse
+from qpid.messaging import *
+from qpid.util import URL
+
+parser = optparse.OptionParser(usage="usage: %prog [options] ADDRESS ...",
+ description="Drain messages from the supplied address.")
+parser.add_option("-b", "--broker", default="localhost",
+ help="connect to specified BROKER (default %default)")
+parser.add_option("-t", "--timeout", type=float, default=0,
+ help="timeout in seconds to wait before exiting (default %default)")
+parser.add_option("-f", "--forever", action="store_true",
+ help="ignore timeout and wait forever")
+
+opts, args = parser.parse_args()
+
+url = URL(opts.broker)
+if args:
+ addr = args.pop(0)
+else:
+ parser.error("address is required")
+if opts.forever:
+ timeout = None
+else:
+ timeout = opts.timeout
+
+# XXX: should make URL default the port for us
+conn = Connection.open(url.host, url.port or AMQP_PORT,
+ username=url.user, password=url.password)
+ssn = conn.session()
+rcv = ssn.receiver(addr)
+
+while True:
+ try:
+ print rcv.fetch(timeout=timeout)
+ ssn.acknowledge()
+ except Empty:
+ break
+ except ReceiveError, e:
+ print e
+ break
+
+conn.close()
diff --git a/python/examples/api/server b/python/examples/api/server
new file mode 100755
index 0000000000..adb2dcf792
--- /dev/null
+++ b/python/examples/api/server
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import optparse, sys, traceback
+from qpid.messaging import *
+from qpid.util import URL
+from subprocess import Popen, STDOUT, PIPE
+from qpid.log import enable, DEBUG, WARN
+
+parser = optparse.OptionParser(usage="usage: %prog [options] ADDRESS ...",
+ description="handle requests from the supplied address.")
+parser.add_option("-b", "--broker", default="localhost",
+ help="connect to specified BROKER (default %default)")
+parser.add_option("-v", dest="verbose", action="store_true", help="enable logging")
+
+opts, args = parser.parse_args()
+
+if opts.verbose:
+ enable("qpid", DEBUG)
+else:
+ enable("qpid", WARN)
+
+url = URL(opts.broker)
+if args:
+ addr = args.pop(0)
+else:
+ parser.error("address is required")
+
+# XXX: should make URL default the port for us
+conn = Connection.open(url.host, url.port or AMQP_PORT,
+ username=url.user, password=url.password)
+conn.reconnect = True
+ssn = conn.session()
+rcv = ssn.receiver(addr)
+
+def dispatch(msg):
+ msg_type = msg.properties.get("type")
+ if msg_type == "shell":
+ proc = Popen(msg.content, shell=True, stderr=STDOUT, stdin=PIPE, stdout=PIPE)
+ output, _ = proc.communicate()
+ result = Message(output)
+ result.properties["exit"] = proc.returncode
+ elif msg_type == "eval":
+ try:
+ content = eval(msg.content)
+ except:
+ content = traceback.format_exc()
+ result = Message(content)
+ else:
+ result = Message("unrecognized message type: %s" % msg_type)
+ return result
+
+while True:
+ try:
+ msg = rcv.fetch()
+ response = dispatch(msg)
+ snd = ssn.sender(msg.reply_to)
+ try:
+ snd.send(response)
+ except SendError, e:
+ print e
+ snd.close()
+ ssn.acknowledge()
+ except Empty:
+ break
+ except ReceiveError, e:
+ print e
+ break
+
+conn.close()
diff --git a/python/examples/api/spout b/python/examples/api/spout
new file mode 100755
index 0000000000..6a9b2b6e3d
--- /dev/null
+++ b/python/examples/api/spout
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import optparse, time
+from qpid.messaging import *
+from qpid.util import URL
+
+def nameval(st):
+ idx = st.find("=")
+ if idx >= 0:
+ name = st[0:idx]
+ value = st[idx+1:]
+ else:
+ name = st
+ value = None
+ return name, value
+
+parser = optparse.OptionParser(usage="usage: %prog [options] ADDRESS [ CONTENT ... ]",
+ description="Send messages to the supplied address.")
+parser.add_option("-b", "--broker", default="localhost",
+ help="connect to specified BROKER (default %default)")
+parser.add_option("-c", "--count", type=int, default=1,
+ help="stop after count messages have been sent, zero disables (default %default)")
+parser.add_option("-t", "--timeout", type=float, default=None,
+ help="exit after the specified time")
+parser.add_option("-i", "--id", help="use the supplied id instead of generating one")
+parser.add_option("-r", "--reply-to", help="specify reply-to address")
+parser.add_option("-P", "--property", dest="properties", action="append", default=[],
+ help="specify message property")
+parser.add_option("-M", "--map", dest="entries", action="append", default=[],
+ help="specify map entry for message body")
+
+opts, args = parser.parse_args()
+
+url = URL(opts.broker)
+if opts.id is None:
+ spout_id = str(uuid4())
+else:
+ spout_id = opts.id
+if args:
+ addr = args.pop(0)
+else:
+ parser.error("address is required")
+
+content = None
+
+if args:
+ text = " ".join(args)
+else:
+ text = None
+
+if opts.entries:
+ content = {}
+ if text:
+ content["text"] = text
+ for e in opts.entries:
+ name, val = nameval(e)
+ content[name] = val
+else:
+ content = text
+
+# XXX: should make URL default the port for us
+conn = Connection.open(url.host, url.port or AMQP_PORT,
+ username=url.user, password=url.password)
+ssn = conn.session()
+snd = ssn.sender(addr)
+
+count = 0
+start = time.time()
+while (opts.count == 0 or count < opts.count) and \
+ (opts.timeout is None or time.time() - start < opts.timeout):
+ msg = Message(content, reply_to=opts.reply_to)
+ msg.properties["spout-id"] = "%s:%s" % (spout_id, count)
+ for p in opts.properties:
+ name, val = nameval(p)
+ msg.properties[name] = val
+
+ try:
+ snd.send(msg)
+ count += 1
+ print msg
+ except SendError, e:
+ print e
+ break
+
+conn.close()
diff --git a/python/examples/datatypes/client.py b/python/examples/datatypes/client.py
new file mode 100755
index 0000000000..088e529909
--- /dev/null
+++ b/python/examples/datatypes/client.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ client.py
+
+ Client for testing use of Unicode and datatypes.
+
+ Both client and server will be written in C++ and Python.
+ Tests can run clients and servers written in different
+ languages, and they can be run on 32-bit and 64-bit architectures.
+
+"""
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+import testdata
+
+#----- Initialization --------------------------------------
+
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+
+#----- Main Body -- ----------------------------------------
+
+# Create a response queue for the server to send responses to. Use the
+# same string as the name of the queue and the name of the routing
+# key.
+
+reply_to = "reply_to:" + session.name
+session.queue_declare(queue=reply_to, exclusive=True)
+session.exchange_bind(exchange="amq.direct", queue=reply_to, binding_key=reply_to)
+
+# Create a local queue and subscribe it to the response queue
+
+local_queue_name = "local_queue"
+queue = session.incoming(local_queue_name)
+
+# Call message_subscribe() to tell the broker to deliver messages from
+# the server's reply_to queue to our local client queue. The server
+# will start delivering messages as soon as message credit is
+# available.
+
+session.message_subscribe(queue=reply_to, destination=local_queue_name)
+queue.start()
+
+# Set up the properties. Perhaps a few application headers?
+
+delivery_properties = session.delivery_properties(routing_key="request")
+
+message_properties = session.message_properties()
+
+message_properties.content_encoding="text/plain; charset='utf-8'"
+
+testdata.set_application_headers(message_properties)
+message_properties.reply_to = session.reply_to("amq.direct", reply_to)
+
+# deliver the message - remember to encode the Unicode string!
+request = Message(message_properties, delivery_properties, testdata.String_Greek.encode("utf8"))
+session.message_transfer(destination="amq.direct", message=request)
+
+# Now see what messages the server sent to our reply_to queue
+
+try:
+ response = queue.get(timeout=10)
+ content = response.body
+ session.message_accept(RangedSet(response.id))
+ testdata.check_message(response)
+ print "Response: " + content
+except Empty:
+ print "No more messages!"
+ exit(1)
+except:
+ print "Unexpected exception!"
+ exit(1)
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+
+session.close(timeout=10)
diff --git a/python/examples/datatypes/server.py b/python/examples/datatypes/server.py
new file mode 100755
index 0000000000..18e6fa4ad7
--- /dev/null
+++ b/python/examples/datatypes/server.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ server.py
+
+ Server for testing use of Unicode and datatypes.
+
+ Both client and server will be written in C++ and Python.
+ Tests can run clients and servers written in different
+ languages, and they can be run on 32-bit and 64-bit architectures.
+"""
+
+import testdata
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#----- Functions -------------------------------------------
+def respond(session, request):
+
+ # The routing key for the response is the request's reply-to
+ # property. The body for the response is the request's body,
+ # converted to upper case.
+
+ testdata.check_message(request)
+
+ message_properties = request.get("message_properties")
+ reply_to = message_properties.reply_to
+
+ testdata.set_application_headers(message_properties)
+
+ if reply_to == None:
+ raise Exception("This message is missing the 'reply_to' property, which is required")
+
+ delivery_properties = session.delivery_properties(routing_key=reply_to["routing_key"])
+ response = Message(delivery_properties, message_properties, testdata.String_Greek.encode("utf8"))
+ print "Sending response ..."
+ session.message_transfer(destination=reply_to["exchange"], message=response)
+
+#----- Initialization --------------------------------------
+
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Main Body -- ----------------------------------------
+
+# Create a request queue and subscribe to it
+
+session.queue_declare(queue="request", exclusive=True)
+session.exchange_bind(exchange="amq.direct", queue="request", binding_key="request")
+
+local_queue_name = "local_queue"
+
+session.message_subscribe(queue="request", destination=local_queue_name)
+
+queue = session.incoming(local_queue_name)
+queue.start()
+
+# Remind the user to start the client program
+
+print "Request server running - run your client now."
+print "(Times out after 100 seconds ...)"
+sys.stdout.flush()
+
+# Respond to each request
+
+# If we get a message, send it back to the user (as indicated in the
+# ReplyTo property)
+
+while True:
+ try:
+ request = queue.get(timeout=100)
+ session.message_accept(RangedSet(request.id))
+
+ respond(session, request)
+ except Empty:
+ print "No more messages!"
+ break;
+
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+
+session.close(timeout=10)
diff --git a/python/examples/datatypes/testdata.py b/python/examples/datatypes/testdata.py
new file mode 100644
index 0000000000..cdf140d400
--- /dev/null
+++ b/python/examples/datatypes/testdata.py
@@ -0,0 +1,180 @@
+# -*- encoding: utf-8 -*-
+
+from qpid.datatypes import uuid4, timestamp
+
+#----- Some variables to test boundary conditions on various data types
+
+void = None
+boolean_true = True
+boolean_false = False
+Uint8_0 = 0
+Uint8_max = 255
+Uint16_0 = 0
+Uint16_max = 65535
+Uint32_0 = 0
+Uint32_max = 4294967295
+Uint64_0 = 0
+Uint64_max = 18446744073709551615
+Int8_min = -128
+Int8_0 = 0
+Int8_max = 127
+Int16_min = -32768
+Int16_0 = 0
+Int16_max = 32767
+Int32_min = -2147483648
+Int32_0 = 0
+Int32_max = 2147483647
+Int64_min = -9223372036854775808
+Int64_0 = 0
+Int64_max = 9223372036854775807
+
+Float_pi = 3.14159265
+Float_neg = -1E4
+Float_big = 1267.43233E12
+Float_small = 12.78e-12
+Float_neg0 = -0
+Float_pos0 = 0
+Float_INF = float('inf')
+Float_Negative_INF = float('-inf')
+
+Double_pi = 3.1415926535897932384626433832795
+Double_neg = -1E4
+Double_big = 1267.43233E12
+Double_small = 12.78e-2
+Double_neg0 = -0
+Double_pos0 = 0
+Double_INF = float('inf')
+Double_Negative_INF = float('-inf')
+
+char_1byte = u'0024' # $
+char_2byte = u'00A2' # ¢
+char_3byte = u'20AC' # €
+char_4byte = u'10ABCD'
+
+timestamp = timestamp()
+
+UUID = uuid4()
+
+String_Greek = u"ἐξίσταντο δὲ πάντες καὶ διηπόρουν, ἄλλος πρὸς ἄλλον λέγοντες, Τί θέλει τοῦτο εἶναι;"
+
+String_Empty = ""
+
+#----- A few functions ----------------------------------------------------------
+
+def near_enough(float1, float2, delta):
+ return abs(float1-float2) < delta
+
+def set_application_headers(message_properties):
+
+ message_properties.application_headers = {}
+ message_properties.application_headers["void"] = None
+ message_properties.application_headers["boolean_true"] = boolean_true
+ message_properties.application_headers["boolean_false"] = boolean_false
+ message_properties.application_headers["Uint8_0"] = Uint8_0
+ message_properties.application_headers["Uint8_max"] = Uint8_max
+ message_properties.application_headers["Uint16_0"] = Uint16_0
+ message_properties.application_headers["Uint16_max"] = Uint16_max
+ message_properties.application_headers["Uint32_0"] = Uint32_0
+ message_properties.application_headers["Uint32_max"] = Uint32_max
+ message_properties.application_headers["Uint64_0"] = Uint64_0
+# message_properties.application_headers["Uint64_max"] = Uint64_max
+ message_properties.application_headers["Int8_min"] = Int8_min
+ message_properties.application_headers["Int8_0"] = Int8_0
+ message_properties.application_headers["Int8_max"] = Int8_max
+ message_properties.application_headers["Int16_min"] = Int16_min
+ message_properties.application_headers["Int16_0"] = Int16_0
+ message_properties.application_headers["Int16_max"] = Int16_max
+ message_properties.application_headers["Int32_min"] = Int32_min
+ message_properties.application_headers["Int32_0"] = Int32_0
+ message_properties.application_headers["Int32_max"] = Int32_max
+ message_properties.application_headers["Int64_min"] = Int64_min
+ message_properties.application_headers["Int64_0"] = Int64_0
+ message_properties.application_headers["Int64_max"] = Int64_max
+
+ message_properties.application_headers["Float_pi"] = Float_pi
+ message_properties.application_headers["Float_neg"] = Float_neg
+ message_properties.application_headers["Float_big"] = Float_big
+ message_properties.application_headers["Float_small"] = Float_small
+ message_properties.application_headers["Float_neg0"] = Float_neg0
+ message_properties.application_headers["Float_pos0"] = Float_pos0
+ message_properties.application_headers["Float_INF"] = Float_INF
+ message_properties.application_headers["Float_Negative_INF"] = Float_Negative_INF
+
+ message_properties.application_headers["Double_pi"] = Double_pi
+ message_properties.application_headers["Double_neg"] = Double_neg
+ message_properties.application_headers["Double_big"] = Double_big
+ message_properties.application_headers["Double_small"] = Double_small
+ message_properties.application_headers["Double_neg0"] = Double_neg0
+ message_properties.application_headers["Double_pos0"] = Double_pos0
+ message_properties.application_headers["Double_INF"] = Double_INF
+ message_properties.application_headers["Double_Negative_INF"] = Double_Negative_INF
+
+ message_properties.application_headers["char_1byte"] = char_1byte
+ message_properties.application_headers["char_2byte"] = char_2byte
+ message_properties.application_headers["char_3byte"] = char_3byte
+ message_properties.application_headers["char_4byte"] = char_4byte
+
+ message_properties.application_headers["timestamp"] = timestamp
+ message_properties.application_headers["UUID"] = uuid4()
+ message_properties.application_headers["String_Greek"] = String_Greek
+ message_properties.application_headers["String_Empty"] = String_Empty
+
+def check_message(message):
+
+# message_properties = message.message_properties()
+ message_properties = message.get("message_properties")
+ assert message_properties.application_headers["void"] == None
+ assert message_properties.application_headers["boolean_true"] == boolean_true
+ assert message_properties.application_headers["boolean_false"] == boolean_false
+ assert message_properties.application_headers["Uint8_0"] == Uint8_0
+ assert message_properties.application_headers["Uint8_max"] == Uint8_max
+ assert message_properties.application_headers["Uint16_0"] == Uint16_0
+ assert message_properties.application_headers["Uint16_max"] == Uint16_max
+ assert message_properties.application_headers["Uint32_0"] == Uint32_0
+ assert message_properties.application_headers["Uint32_max"] == Uint32_max
+ assert message_properties.application_headers["Uint64_0"] == Uint64_0
+# assert message_properties.application_headers["Uint64_max"] == Uint64_max
+ assert message_properties.application_headers["Int8_min"] == Int8_min
+ assert message_properties.application_headers["Int8_0"] == Int8_0
+ assert message_properties.application_headers["Int8_max"] == Int8_max
+ assert message_properties.application_headers["Int16_min"] == Int16_min
+ assert message_properties.application_headers["Int16_0"] == Int16_0
+ assert message_properties.application_headers["Int16_max"] == Int16_max
+ assert message_properties.application_headers["Int32_min"] == Int32_min
+ assert message_properties.application_headers["Int32_0"] == Int32_0
+ assert message_properties.application_headers["Int32_max"] == Int32_max
+ assert message_properties.application_headers["Int64_min"] == Int64_min
+ assert message_properties.application_headers["Int64_0"] == Int64_0
+ assert message_properties.application_headers["Int64_max"] == Int64_max
+
+# Change floating point comparisons to allow inexactness
+
+ assert near_enough(message_properties.application_headers["Float_pi"], Float_pi, 0.00001)
+ assert near_enough(message_properties.application_headers["Float_neg"], Float_neg, 0.00001)
+ assert near_enough(message_properties.application_headers["Float_big"], Float_big, Float_big/1000000)
+ assert near_enough(message_properties.application_headers["Float_small"], Float_small, 0.00001)
+ assert message_properties.application_headers["Float_neg0"] == Float_neg0
+ assert message_properties.application_headers["Float_pos0"] == Float_pos0
+ assert message_properties.application_headers["Float_INF"] == Float_INF
+ assert message_properties.application_headers["Float_Negative_INF"] == Float_Negative_INF
+
+ assert near_enough(message_properties.application_headers["Double_pi"], Double_pi, 0.00001)
+ assert near_enough(message_properties.application_headers["Double_neg"], Double_neg, 0.00001)
+ assert near_enough(message_properties.application_headers["Double_big"], Double_big, Double_big/1000000)
+ assert near_enough(message_properties.application_headers["Double_small"], Double_small, 0.00001)
+ assert message_properties.application_headers["Double_neg0"] == Double_neg0
+ assert message_properties.application_headers["Double_pos0"] == Double_pos0
+ assert message_properties.application_headers["Double_INF"] == Double_INF
+ assert message_properties.application_headers["Double_Negative_INF"] == Double_Negative_INF
+
+ assert message_properties.application_headers["char_1byte"] == char_1byte
+ assert message_properties.application_headers["char_2byte"] == char_2byte
+ assert message_properties.application_headers["char_3byte"] == char_3byte
+ assert message_properties.application_headers["char_4byte"] == char_4byte
+
+# assert message_properties.application_headers["timestamp"] == timestamp
+# assert message_properties.application_headers["UUID"] == UUID
+ assert message_properties.application_headers["String_Greek"] == String_Greek
+ assert message_properties.application_headers["String_Empty"] == String_Empty
+
+
diff --git a/python/examples/direct/declare_queues.py b/python/examples/direct/declare_queues.py
index f0c34fa8c9..13818ee9d7 100755
--- a/python/examples/direct/declare_queues.py
+++ b/python/examples/direct/declare_queues.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
declare_queues.py
@@ -36,7 +54,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/direct/direct_consumer.py b/python/examples/direct/direct_consumer.py
index 23577e9f53..b07e53c5c7 100755
--- a/python/examples/direct/direct_consumer.py
+++ b/python/examples/direct/direct_consumer.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
direct_consumer.py
@@ -34,7 +52,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/direct/direct_producer.py b/python/examples/direct/direct_producer.py
index 870ce66e78..fcbb4675e4 100755
--- a/python/examples/direct/direct_producer.py
+++ b/python/examples/direct/direct_producer.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
direct_producer.py
@@ -34,7 +52,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/direct/listener.py b/python/examples/direct/listener.py
index 66927eca4b..9d06bd3929 100755
--- a/python/examples/direct/listener.py
+++ b/python/examples/direct/listener.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
listener.py
@@ -55,7 +73,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/direct/verify b/python/examples/direct/verify
index 01d81a18a1..92f87bf827 100644
--- a/python/examples/direct/verify
+++ b/python/examples/direct/verify
@@ -1,3 +1,22 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
clients ./declare_queues.py ./direct_producer.py ./direct_consumer.py
outputs ./declare_queues.py.out ./direct_producer.py.out ./direct_consumer.py.out
diff --git a/python/examples/fanout/fanout_consumer.py b/python/examples/fanout/fanout_consumer.py
index a2b1b30141..0452baa8da 100755
--- a/python/examples/fanout/fanout_consumer.py
+++ b/python/examples/fanout/fanout_consumer.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
fanout_consumer.py
@@ -32,7 +50,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/fanout/fanout_producer.py b/python/examples/fanout/fanout_producer.py
index 3950ca6d2e..c4df252c70 100755
--- a/python/examples/fanout/fanout_producer.py
+++ b/python/examples/fanout/fanout_producer.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
fanout_producer.py
@@ -31,7 +49,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/fanout/listener.py b/python/examples/fanout/listener.py
index 74ae858127..29db402e9d 100755
--- a/python/examples/fanout/listener.py
+++ b/python/examples/fanout/listener.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
listener.py
@@ -52,7 +70,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/fanout/verify b/python/examples/fanout/verify
index 6a3132a94f..9e5c364bfa 100644
--- a/python/examples/fanout/verify
+++ b/python/examples/fanout/verify
@@ -1,3 +1,22 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
background "Subscribed" ./fanout_consumer.py
background "Subscribed" ./fanout_consumer.py
diff --git a/python/examples/headers/declare_queues.py b/python/examples/headers/declare_queues.py
new file mode 100755
index 0000000000..b3d5c43fe5
--- /dev/null
+++ b/python/examples/headers/declare_queues.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ declare_queues.py
+
+ Creates and binds a queue on an AMQP headers exchange.
+
+ All messages with an application header of {'class': 'first'} are sent to queue "first".
+ All messages with an application header of {'class': 'second'} are sent to queue "second".
+"""
+
+# Common includes
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#----- Initialization -----------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Create queues -------------------------------------
+
+# queue_declare() creates an AMQP queue, which is held
+# on the broker. Published messages are sent to the AMQP queue,
+# from which messages are delivered to consumers.
+#
+# exchange_bind() determines which messages are routed to a queue.
+
+session.queue_declare(queue="first")
+session.exchange_bind(exchange="amq.match", queue="first", arguments={'x-match':'any', 'class':'first'})
+
+session.queue_declare(queue="second")
+session.exchange_bind(exchange="amq.match", queue="second", arguments={'x-match':'any', 'class':'second'})
+
+#----- Cleanup ---------------------------------------------
+
+session.close(timeout=10)
diff --git a/python/examples/headers/headers_consumer.py b/python/examples/headers/headers_consumer.py
new file mode 100755
index 0000000000..8f5ce3c5ff
--- /dev/null
+++ b/python/examples/headers/headers_consumer.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ headers_consumer.py
+
+ This AMQP client reads messages from two message
+ queues named "first" and "second".
+"""
+
+import qpid
+import sys
+import os
+from random import randint
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+
+#----- Initialization --------------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Read from queue --------------------------------------------
+
+# Now let's create two local client queues and tell them to read
+# incoming messages.
+
+# The consumer tag identifies the client-side queue.
+
+local_queue_name_first = "local_queue_first"
+local_queue_name_second = "local_queue_second"
+
+queue_first = session.incoming(local_queue_name_first)
+queue_second = session.incoming(local_queue_name_second)
+
+# Call message_subscribe() to tell the broker to deliver messages
+# from the AMQP queue to these local client queues. The broker will
+# start delivering messages as soon as credit is allocated using
+# queue.start().
+
+session.message_subscribe(queue="first", destination=local_queue_name_first)
+session.message_subscribe(queue="second", destination=local_queue_name_second)
+
+queue_first.start()
+queue_second.start()
+
+# Initialize 'final' and 'content', variables used to identify the last message.
+
+final = "That's all, folks!" # In a message body, signals the last message
+content = "" # Content of the last message read
+
+message = None
+while content != final:
+ message = queue_first.get(timeout=10)
+ content = message.body
+ session.message_accept(RangedSet(message.id))
+ print content
+
+content = ""
+while content != final:
+ message = queue_second.get(timeout=10)
+ content = message.body
+ session.message_accept(RangedSet(message.id))
+ print content
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+#
+
+session.close(timeout=10)
diff --git a/python/examples/headers/headers_producer.py b/python/examples/headers/headers_producer.py
new file mode 100755
index 0000000000..43130d5993
--- /dev/null
+++ b/python/examples/headers/headers_producer.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ headers_producer.py
+
+ Publishes messages to an AMQP headers exchange, using
+ various application header values.
+"""
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message
+from qpid.datatypes import uuid4
+from qpid.queue import Empty
+
+
+#----- Initialization -----------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Publish some messages ------------------------------
+
+# Create some messages and put them on the broker.
+props_first = session.message_properties(application_headers={'class':'first'})
+props_second = session.message_properties(application_headers={'class':'second'})
+props_third = session.message_properties(application_headers={'class':'third'})
+
+for i in range(10):
+ session.message_transfer(destination="amq.match", message=Message(props_first,"message(first) " + str(i)))
+ session.message_transfer(destination="amq.match", message=Message(props_second,"message(second) " + str(i)))
+ session.message_transfer(destination="amq.match", message=Message(props_third,"message(third) " + str(i)))
+
+session.message_transfer(destination="amq.match", message=Message(props_first,"That's all, folks!"))
+session.message_transfer(destination="amq.match", message=Message(props_second,"That's all, folks!"))
+session.message_transfer(destination="amq.match", message=Message(props_third,"That's all, folks!"))
+
+#----- Cleanup --------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+
+session.close(timeout=10)
diff --git a/python/tests_0-9/execution.py b/python/examples/headers/verify
index f2facfe42b..5fe96c5c23 100644
--- a/python/tests_0-9/execution.py
+++ b/python/examples/headers/verify
@@ -17,13 +17,6 @@
# under the License.
#
-from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
-
-class ExecutionTests (TestBase):
- def test_flush(self):
- channel = self.channel
- for i in [1, 2, 3]:
- channel.basic_publish()
- channel.execution_flush()
- assert(channel.completion.wait(channel.completion.command_id, timeout=1))
+# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
+clients ./declare_queues.py ./headers_producer.py ./headers_consumer.py
+outputs ./declare_queues.py.out ./headers_producer.py.out ./headers_consumer.py.out
diff --git a/python/examples/headers/verify.in b/python/examples/headers/verify.in
new file mode 100644
index 0000000000..90ffd0a071
--- /dev/null
+++ b/python/examples/headers/verify.in
@@ -0,0 +1,25 @@
+==== declare_queues.py.out
+==== headers_producer.py.out
+==== headers_consumer.py.out
+message(first) 0
+message(first) 1
+message(first) 2
+message(first) 3
+message(first) 4
+message(first) 5
+message(first) 6
+message(first) 7
+message(first) 8
+message(first) 9
+That's all, folks!
+message(second) 0
+message(second) 1
+message(second) 2
+message(second) 3
+message(second) 4
+message(second) 5
+message(second) 6
+message(second) 7
+message(second) 8
+message(second) 9
+That's all, folks!
diff --git a/python/examples/pubsub/topic_publisher.py b/python/examples/pubsub/topic_publisher.py
index 8cf1b08644..b50d5fa8ca 100755
--- a/python/examples/pubsub/topic_publisher.py
+++ b/python/examples/pubsub/topic_publisher.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
topic_publisher.py
diff --git a/python/examples/pubsub/topic_subscriber.py b/python/examples/pubsub/topic_subscriber.py
index 039cc0c55b..489c7cbb19 100755
--- a/python/examples/pubsub/topic_subscriber.py
+++ b/python/examples/pubsub/topic_subscriber.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
topic_subscriber.py
@@ -63,7 +81,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/pubsub/verify b/python/examples/pubsub/verify
index 963d2e32e1..cf1bade62e 100644
--- a/python/examples/pubsub/verify
+++ b/python/examples/pubsub/verify
@@ -1,3 +1,22 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
background "Queues created" ./topic_subscriber.py
clients ./topic_publisher.py
diff --git a/python/examples/request-response/client.py b/python/examples/request-response/client.py
index a9ecd5c78f..b29fcf3ea7 100755
--- a/python/examples/request-response/client.py
+++ b/python/examples/request-response/client.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
client.py
@@ -55,7 +73,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/request-response/server.py b/python/examples/request-response/server.py
index 05ee051c57..a80c4541e4 100755
--- a/python/examples/request-response/server.py
+++ b/python/examples/request-response/server.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
server.py
@@ -46,7 +64,7 @@ if len(sys.argv) > 2 :
port=int(sys.argv[2])
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/request-response/verify b/python/examples/request-response/verify
index cf8151d4e4..3c058febb2 100644
--- a/python/examples/request-response/verify
+++ b/python/examples/request-response/verify
@@ -1,3 +1,22 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
background "Request server running" ./server.py
clients ./client.py
diff --git a/python/examples/xml-exchange/declare_queues.py b/python/examples/xml-exchange/declare_queues.py
index bd17da5013..ca40af5dc5 100755
--- a/python/examples/xml-exchange/declare_queues.py
+++ b/python/examples/xml-exchange/declare_queues.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
declare_queues.py
@@ -35,7 +53,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/xml-exchange/listener.py b/python/examples/xml-exchange/listener.py
index dec824dddf..a56f5d6018 100755
--- a/python/examples/xml-exchange/listener.py
+++ b/python/examples/xml-exchange/listener.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
listener.py
@@ -52,7 +70,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/xml-exchange/verify b/python/examples/xml-exchange/verify
index bf05463f1d..a93a32dc90 100644
--- a/python/examples/xml-exchange/verify
+++ b/python/examples/xml-exchange/verify
@@ -1,3 +1,22 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
clients ./declare_queues.py ./xml_producer.py ./xml_consumer.py
outputs ./declare_queues.py.out ./xml_producer.py.out ./xml_consumer.py.out
diff --git a/python/examples/xml-exchange/xml_consumer.py b/python/examples/xml-exchange/xml_consumer.py
index 0ab079e7a6..cd89110b05 100755
--- a/python/examples/xml-exchange/xml_consumer.py
+++ b/python/examples/xml-exchange/xml_consumer.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
direct_consumer.py
@@ -34,7 +52,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/examples/xml-exchange/xml_producer.py b/python/examples/xml-exchange/xml_producer.py
index 72c5bdb53a..fa97cab4e1 100755
--- a/python/examples/xml-exchange/xml_producer.py
+++ b/python/examples/xml-exchange/xml_producer.py
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
"""
xml_producer.py
@@ -52,7 +70,7 @@ if len(sys.argv) > 2 :
# Create a connection.
socket = connect(host, port)
-connection = Connection (sock=socket)
+connection = Connection (sock=socket, username=user, password=password)
connection.start()
session = connection.session(str(uuid4()))
diff --git a/python/hello-world b/python/hello-world
index 5d513cc57b..efee84059c 100755
--- a/python/hello-world
+++ b/python/hello-world
@@ -1,10 +1,39 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import sys
from qpid.connection import Connection
from qpid.util import connect
from qpid.datatypes import uuid4, Message
+broker = "127.0.0.1"
+port = 5672
+
+if len(sys.argv) > 1: broker = sys.argv[1]
+if len(sys.argv) > 2: port = int(sys.argv[2])
+
+if len(sys.argv) > 3:
+ print >> sys.stderr, "usage: hello-world [ <broker> [ <port> ] ]"
+ sys.exit(1)
+
# connect to the server and start a session
-conn = Connection(connect("127.0.0.1", 5672))
+conn = Connection(connect(broker, port))
conn.start()
ssn = conn.session(str(uuid4()))
diff --git a/python/java_failing_0-8.txt b/python/java_failing_0-8.txt
deleted file mode 100644
index c13b40a42c..0000000000
--- a/python/java_failing_0-8.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-tests_0-8.exchange.RecommendedTypesRuleTests.testTopic
-tests_0-8.exchange.RequiredInstancesRuleTests.testAmqTopic
diff --git a/python/java_failing_0-9.txt b/python/java_failing_0-9.txt
deleted file mode 100644
index 7252d0f496..0000000000
--- a/python/java_failing_0-9.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-ntests.basic.BasicTests.test_qos_prefetch_count
-tests.basic.BasicTests.test_ack
-tests.basic.BasicTests.test_cancel
-tests.basic.BasicTests.test_consume_exclusive
-tests.basic.BasicTests.test_consume_no_local
-tests.basic.BasicTests.test_consume_queue_errors
-tests.basic.BasicTests.test_consume_unique_consumers
-tests.basic.BasicTests.test_get
-tests.basic.BasicTests.test_qos_prefetch_size
-tests.basic.BasicTests.test_recover_requeue
-
-tests.exchange.RecommendedTypesRuleTests.testTopic
-tests.exchange.RequiredInstancesRuleTests.testAmqTopic
-
-tests.message.MessageTests.test_checkpoint
-tests.message.MessageTests.test_reject
-
-tests.broker.BrokerTests.test_ping_pong
diff --git a/python/mllib/__init__.py b/python/mllib/__init__.py
index 39e9363614..9aa1e56e66 100644
--- a/python/mllib/__init__.py
+++ b/python/mllib/__init__.py
@@ -24,6 +24,8 @@ both SGML and XML.
import os, dom, transforms, parsers, sys
import xml.sax, types
+from xml.sax.handler import ErrorHandler
+from xml.sax.xmlreader import InputSource
from cStringIO import StringIO
def transform(node, *args):
@@ -49,15 +51,33 @@ def sgml_parse(source):
p.close()
return p.parser.tree
-def xml_parse(filename):
+class Resolver:
+
+ def __init__(self, path):
+ self.path = path
+
+ def resolveEntity(self, publicId, systemId):
+ for p in self.path:
+ fname = os.path.join(p, systemId)
+ if os.path.exists(fname):
+ source = InputSource(systemId)
+ source.setByteStream(open(fname))
+ return source
+ return InputSource(systemId)
+
+def xml_parse(filename, path=()):
if sys.version_info[0:2] == (2,3):
# XXX: this is for older versions of python
- source = "file://%s" % os.path.abspath(filename)
+ source = "file://%s" % os.path.abspath(filename)
else:
source = filename
- p = parsers.XMLParser()
- xml.sax.parse(source, p)
- return p.parser.tree
+ h = parsers.XMLParser()
+ p = xml.sax.make_parser()
+ p.setContentHandler(h)
+ p.setErrorHandler(ErrorHandler())
+ p.setEntityResolver(Resolver(path))
+ p.parse(source)
+ return h.parser.tree
def sexp(node):
s = transforms.Sexp()
diff --git a/python/mllib/dom.py b/python/mllib/dom.py
index df2b88322a..486f7082e1 100644
--- a/python/mllib/dom.py
+++ b/python/mllib/dom.py
@@ -148,6 +148,21 @@ class Tag(Node):
if name == k:
return v
+ def _idx(self, attr):
+ idx = 0
+ for k, v in self.attrs:
+ if k == attr:
+ return idx
+ idx += 1
+ return None
+
+ def set_attr(self, name, value):
+ idx = self._idx(name)
+ if idx is None:
+ self.attrs.append((name, value))
+ else:
+ self.attrs[idx] = (name, value)
+
def dispatch(self, f):
try:
attr = "do_" + self.name
diff --git a/python/models/fedsim/__init__.py b/python/models/fedsim/__init__.py
new file mode 100644
index 0000000000..63a3f41f28
--- /dev/null
+++ b/python/models/fedsim/__init__.py
@@ -0,0 +1,19 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
diff --git a/python/models/fedsim/fedsim.py b/python/models/fedsim/fedsim.py
new file mode 100644
index 0000000000..edb6c4c8ed
--- /dev/null
+++ b/python/models/fedsim/fedsim.py
@@ -0,0 +1,434 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+class Sim:
+ def __init__(self):
+ self.brokers = {}
+ self.clients = {}
+ self.errors = 0
+ self.warnings = 0
+
+ def error(self, text):
+ self.errors += 1
+ print "###### Error:", text
+
+ def warning(self, text):
+ self.warnings += 1
+ print "###### Warning:", text
+
+ def end(self):
+ print "========================"
+ print "Errors: %d, Warnings: %d" % (self.errors, self.warnings)
+ print "========================"
+
+ def dumpState(self):
+ print "============================"
+ print "===== Federation State ====="
+ print "============================"
+ for broker in self.brokers:
+ for exchange in self.brokers[broker].exchanges:
+ print "Exchange %s.%s" % (broker, exchange)
+ for key in self.brokers[broker].exchanges[exchange].keys:
+ print " Key %s" % key
+ for queue in self.brokers[broker].exchanges[exchange].keys[key]:
+ print " Queue %s origins=%s" % \
+ (queue.name, self.brokers[broker].exchanges[exchange].keys[key][queue].originList)
+
+ def addBroker(self, name):
+ if name in self.brokers:
+ raise Exception("Broker of same name already exists")
+ broker = Broker(self, name)
+ self.brokers[name] = broker
+ return broker
+
+ def addClient(self, name, broker):
+ if name in self.clients:
+ raise Exception("Client of same name already exists")
+ client = Client(self, name, broker)
+ self.clients[name] = client
+ return client
+
+ def link(self, left, right, bidir=True):
+ print "====== link %s to %s, bidir=%s" % (left.tag, right.tag, bidir)
+ l1 = left.createLink(right)
+ l1.bridge("amq.direct")
+ if bidir:
+ l2 = right.createLink(left)
+ l2.bridge("amq.direct")
+
+ def bind(self, client, key):
+ print "====== bind Client(%s): k=%s" % (client.name, key)
+ client.bind(key)
+
+ def unbind(self, client, key):
+ print "====== unbind Client(%s): k=%s" % (client.name, key)
+ client.unbind(key)
+
+ def sendMessage(self, key, broker, body="Message Body"):
+ print "====== sendMessage: broker=%s k=%s" % (broker.tag, key)
+ msg = Message(key, body)
+ exchange = broker.exchanges["amq.direct"]
+ for client in self.clients:
+ self.clients[client].expect(key);
+ exchange.receive(key, msg, True)
+ for client in self.clients:
+ self.clients[client].checkReception()
+
+
+class Destination:
+ def receive(self, key, msg, fromUser=False):
+ pass
+
+
+class Client(Destination):
+ def __init__(self, sim, name, broker):
+ self.sim = sim
+ self.name = name
+ self.broker = broker
+ self.broker.connect(self)
+ self.queue = self.broker.declare_queue(name)
+ self.subscription = self.broker.subscribe(self, name)
+ self.expected = None
+ self.boundKeys = []
+
+ def bind(self, key):
+ self.boundKeys.append(key)
+ self.broker.bind("amq.direct", self.name, key)
+
+ def unbind(self, key):
+ self.boundKeys.remove(key)
+ self.broker.unbind("amq.direct", self.name, key)
+
+ def receive(self, key, msg, fromUser=False):
+ print "Client(%s) received [%s]: %s" % (self.name, key, msg.body)
+ if self.expected == key:
+ self.expected = None
+ else:
+ self.sim.error("Client(%s) received unexpected message with key [%s]" % \
+ (self.name, self.expected))
+
+ def expect(self, key):
+ if key in self.boundKeys:
+ self.expected = key
+
+ def checkReception(self):
+ if self.expected:
+ self.sim.error("Client(%s) never received message with key [%s]" % \
+ (self.name, self.expected))
+
+class Broker(Client):
+ def __init__(self, sim, tag):
+ self.sim = sim
+ self.tag = tag
+ self.connections = {}
+ self.exchanges = {}
+ self.queues = {}
+ self.subscriptions = {}
+ self.links = {}
+ self.directExchange = self.declare_exchange("amq.direct")
+
+ def connect(self, client):
+ if client in self.connections:
+ raise Exception("Client already connected")
+ self.connections[client] = Connection(client)
+
+ def declare_queue(self, name, tag=None, exclude=None):
+ if name in self.queues:
+ raise Exception("Queue already exists")
+ self.queues[name] = Queue(self, name, tag, exclude)
+
+ def subscribe(self, dest, queueName):
+ if queueName not in self.queues:
+ raise Exception("Queue does not exist")
+ self.queues[queueName].setDest(dest)
+
+ def declare_exchange(self, name):
+ if name in self.exchanges:
+ return
+ exchange = Exchange(self, name)
+ self.exchanges[name] = exchange
+ return exchange
+
+ def bind(self, exchangeName, queueName, key, tagList=[], fedOp=None, origin=None):
+ if exchangeName not in self.exchanges:
+ raise Exception("Exchange not found")
+ if queueName not in self.queues:
+ raise Exception("Queue not found")
+ exchange = self.exchanges[exchangeName]
+ queue = self.queues[queueName]
+ exchange.bind(queue, key, tagList, fedOp, origin)
+
+ def unbind(self, exchangeName, queueName, key):
+ if exchangeName not in self.exchanges:
+ raise Exception("Exchange not found")
+ if queueName not in self.queues:
+ raise Exception("Queue not found")
+ exchange = self.exchanges[exchangeName]
+ queue = self.queues[queueName]
+ exchange.unbind(queue, key)
+
+ def createLink(self, other):
+ if other in self.links:
+ raise Exception("Peer broker already linked")
+ link = Link(self, other)
+ self.links[other] = link
+ return link
+
+
+class Connection:
+ def __init__(self, client):
+ self.client = client
+
+
+class Exchange(Destination):
+ def __init__(self, broker, name):
+ self.broker = broker
+ self.sim = broker.sim
+ self.name = name
+ self.keys = {}
+ self.bridges = []
+
+ def bind(self, queue, key, tagList, fedOp, origin):
+ if not fedOp: fedOp = "bind"
+ print "Exchange(%s.%s) bind q=%s, k=%s, tags=%s, op=%s, origin=%s" % \
+ (self.broker.tag, self.name, queue.name, key, tagList, fedOp, origin),
+
+ if self.broker.tag in tagList:
+ print "(tag ignored)"
+ return
+
+ if fedOp == "bind" or fedOp == "unbind":
+ if key not in self.keys:
+ self.keys[key] = {}
+ queueMap = self.keys[key]
+
+ if fedOp == "bind":
+ ##
+ ## Add local or federation binding case
+ ##
+ if queue in queueMap:
+ if origin and origin in queueMap[queue].originList:
+ print "(dup ignored)"
+ elif origin:
+ queueMap[queue].originList.append(origin)
+ print "(origin added)"
+ else:
+ binding = Binding(origin)
+ queueMap[queue] = binding
+ print "(binding added)"
+
+ elif fedOp == "unbind":
+ ##
+ ## Delete federation binding case
+ ##
+ if queue in queueMap:
+ binding = queueMap[queue]
+ if origin and origin in binding.originList:
+ binding.originList.remove(origin)
+ if len(binding.originList) == 0:
+ queueMap.pop(queue)
+ if len(queueMap) == 0:
+ self.keys.pop(key)
+ print "(last origin del)"
+ else:
+ print "(removed origin)"
+ else:
+ print "(origin not found)"
+ else:
+ print "(queue not found)"
+
+ elif fedOp == "reorigin":
+ print "(ok)"
+ self.reorigin()
+
+ elif fedOp == "hello":
+ print "(ok)"
+
+ else:
+ raise Exception("Unknown fed-opcode '%s'" % fedOp)
+
+ newTagList = []
+ newTagList.append(self.broker.tag)
+ for tag in tagList:
+ newTagList.append(tag)
+ if origin:
+ propOrigin = origin
+ else:
+ propOrigin = self.broker.tag
+
+ for bridge in self.bridges:
+ if bridge.isDynamic():
+ bridge.propagate(key, newTagList, fedOp, propOrigin)
+
+ def reorigin(self):
+ myTag = []
+ myTag.append(self.broker.tag)
+ for key in self.keys:
+ queueMap = self.keys[key]
+ found = False
+ for queue in queueMap:
+ binding = queueMap[queue]
+ if binding.isLocal():
+ found = True
+ if found:
+ for bridge in self.bridges:
+ if bridge.isDynamic():
+ bridge.propagate(key, myTag, "bind", self.broker.tag)
+
+ def unbind(self, queue, key):
+ print "Exchange(%s.%s) unbind q=%s, k=%s" % (self.broker.tag, self.name, queue.name, key),
+ if key not in self.keys:
+ print "(key not known)"
+ return
+ queueMap = self.keys[key]
+ if queue not in queueMap:
+ print "(queue not bound)"
+ return
+ queueMap.pop(queue)
+ if len(queueMap) == 0:
+ self.keys.pop(key)
+ print "(ok, remove bound-key)"
+ else:
+ print "(ok)"
+
+ count = 0
+ for queue in queueMap:
+ if len(queueMap[queue].originList) == 0:
+ count += 1
+
+ if count == 0:
+ myTag = []
+ myTag.append(self.broker.tag)
+ for bridge in self.bridges:
+ if bridge.isDynamic():
+ bridge.propagate(key, myTag, "unbind", self.broker.tag)
+
+ def receive(self, key, msg, fromUser=False):
+ sent = False
+ if key in self.keys:
+ queueMap = self.keys[key]
+ for queue in queueMap:
+ if queue.enqueue(msg):
+ sent = True
+ if not sent and not fromUser:
+ self.sim.warning("Exchange(%s.%s) received unroutable message: k=%s" % \
+ (self.broker.tag, self.name, key))
+
+ def addDynamicBridge(self, bridge):
+ if bridge in self.bridges:
+ raise Exception("Dynamic bridge already added to exchange")
+ self.bridges.append(bridge)
+
+ for b in self.bridges:
+ if b != bridge:
+ b.sendReorigin()
+ self.reorigin()
+
+class Queue:
+ def __init__(self, broker, name, tag=None, exclude=None):
+ self.broker = broker
+ self.name = name
+ self.tag = tag
+ self.exclude = exclude
+ self.dest = None
+
+ def setDest(self, dest):
+ self.dest = dest
+
+ def enqueue(self, msg):
+ print "Queue(%s.%s) rcvd k=%s, tags=%s" % (self.broker.tag, self.name, msg.key, msg.tags),
+ if self.dest == None:
+ print "(dropped, no dest)"
+ return False
+ if self.exclude and msg.tagFound(self.exclude):
+ print "(dropped, tag)"
+ return False
+ if self.tag:
+ msg.appendTag(self.tag)
+ print "(ok)"
+ self.dest.receive(msg.key, msg)
+ return True
+
+
+class Binding:
+ def __init__(self, origin):
+ self.originList = []
+ if origin:
+ self.originList.append(origin)
+
+ def isLocal(self):
+ return len(self.originList) == 0
+
+
+class Link:
+ def __init__(self, local, remote):
+ self.local = local
+ self.remote = remote
+ self.remote.connect(self)
+ self.bridges = []
+
+ def bridge(self, exchangeName):
+ bridge = Bridge(self, exchangeName)
+
+
+class Bridge:
+ def __init__(self, link, exchangeName):
+ self.link = link
+ self.exchangeName = exchangeName
+ if self.exchangeName not in link.local.exchanges:
+ raise Exception("Exchange not found")
+ self.exchange = link.local.exchanges[self.exchangeName]
+ self.queueName = "bridge." + link.local.tag
+ self.link.remote.declare_queue(self.queueName, self.link.remote.tag, self.link.local.tag)
+ self.link.remote.subscribe(self.exchange, self.queueName)
+ self.exchange.addDynamicBridge(self)
+
+ def isDynamic(self):
+ return True
+
+ def localTag(self):
+ return self.link.local.tag
+
+ def remoteTag(self):
+ return self.link.remote.tag
+
+ def propagate(self, key, tagList, fedOp, origin):
+ if self.link.remote.tag not in tagList:
+ self.link.remote.bind(self.exchangeName, self.queueName, key, tagList, fedOp, origin)
+
+ def sendReorigin(self):
+ myTag = []
+ myTag.append(self.link.local.tag)
+ self.link.remote.bind(self.exchangeName, self.queueName, "", myTag, "reorigin", "")
+
+
+class Message:
+ def __init__(self, key, body):
+ self.key = key
+ self.body = body
+ self.tags = []
+
+ def appendTag(self, tag):
+ if tag not in self.tags:
+ self.tags.append(tag)
+
+ def tagFound(self, tag):
+ return tag in self.tags
+
+
diff --git a/python/models/fedsim/testBig.py b/python/models/fedsim/testBig.py
new file mode 100644
index 0000000000..416a086983
--- /dev/null
+++ b/python/models/fedsim/testBig.py
@@ -0,0 +1,88 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from fedsim import Sim
+
+sim = Sim()
+b1 = sim.addBroker("B1")
+b2 = sim.addBroker("B2")
+b3 = sim.addBroker("B3")
+b4 = sim.addBroker("B4")
+b5 = sim.addBroker("B5")
+b6 = sim.addBroker("B6")
+b7 = sim.addBroker("B7")
+b8 = sim.addBroker("B8")
+
+c1 = sim.addClient("C1", b1)
+c3 = sim.addClient("C3", b3)
+c4 = sim.addClient("C4", b4)
+c5 = sim.addClient("C5", b5)
+c8 = sim.addClient("C8", b8)
+
+sim.link(b1, b2)
+sim.link(b3, b2)
+sim.link(b4, b2)
+sim.link(b5, b2)
+
+sim.link(b6, b7)
+sim.link(b6, b8)
+
+sim.bind(c1, "A")
+sim.bind(c3, "B")
+sim.bind(c8, "A")
+
+sim.link(b5, b6)
+
+sim.bind(c4, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", b4)
+sim.sendMessage("A", b5)
+sim.sendMessage("A", b6)
+sim.sendMessage("A", b7)
+sim.sendMessage("A", b8)
+
+sim.sendMessage("B", b1)
+sim.sendMessage("B", b2)
+sim.sendMessage("B", b3)
+sim.sendMessage("B", b4)
+sim.sendMessage("B", b5)
+sim.sendMessage("B", b6)
+sim.sendMessage("B", b7)
+sim.sendMessage("B", b8)
+
+sim.unbind(c1, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", b4)
+sim.sendMessage("A", b5)
+sim.sendMessage("A", b6)
+sim.sendMessage("A", b7)
+sim.sendMessage("A", b8)
+
+sim.unbind(c4, "A")
+sim.unbind(c3, "B")
+sim.unbind(c8, "A")
+
+sim.dumpState()
+sim.end()
diff --git a/python/models/fedsim/testRing.py b/python/models/fedsim/testRing.py
new file mode 100644
index 0000000000..c883b54993
--- /dev/null
+++ b/python/models/fedsim/testRing.py
@@ -0,0 +1,48 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from fedsim import Sim
+
+sim = Sim()
+b1 = sim.addBroker("B1")
+b2 = sim.addBroker("B2")
+b3 = sim.addBroker("B3")
+
+sim.link(b1, b2, False)
+sim.link(b2, b3, False)
+sim.link(b3, b1, False)
+
+c1 = sim.addClient("C1", b1)
+c2 = sim.addClient("C2", b2)
+c3 = sim.addClient("C3", b3)
+
+sim.bind(c1, "A")
+sim.bind(c2, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+
+sim.unbind(c2, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+
+sim.end()
diff --git a/python/models/fedsim/testStar.py b/python/models/fedsim/testStar.py
new file mode 100644
index 0000000000..e6b801446f
--- /dev/null
+++ b/python/models/fedsim/testStar.py
@@ -0,0 +1,65 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from fedsim import Sim
+
+sim = Sim()
+b1 = sim.addBroker("B1")
+b2 = sim.addBroker("B2")
+b3 = sim.addBroker("B3")
+bc = sim.addBroker("BC")
+
+sim.link(b1, bc)
+sim.link(b2, bc)
+sim.link(b3, bc)
+
+c1 = sim.addClient("C1", b1)
+c2 = sim.addClient("C2", b2)
+c3 = sim.addClient("C3", b3)
+cc = sim.addClient("CC", bc)
+
+sim.bind(c1, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", bc)
+
+sim.bind(c2, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", bc)
+
+sim.unbind(c1, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", bc)
+
+sim.unbind(c2, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", bc)
+
+sim.end()
diff --git a/python/models/fedsim/testStarAdd.py b/python/models/fedsim/testStarAdd.py
new file mode 100644
index 0000000000..e0eb44952a
--- /dev/null
+++ b/python/models/fedsim/testStarAdd.py
@@ -0,0 +1,56 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from fedsim import Sim
+
+sim = Sim()
+b1 = sim.addBroker("B1")
+b2 = sim.addBroker("B2")
+b3 = sim.addBroker("B3")
+bc = sim.addBroker("BC")
+
+sim.link(b1, bc)
+sim.link(b2, bc)
+
+c1 = sim.addClient("C1", b1)
+c2 = sim.addClient("C2", b2)
+c3 = sim.addClient("C3", b3)
+cc = sim.addClient("CC", bc)
+
+sim.bind(c1, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", bc)
+
+sim.bind(c2, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", bc)
+
+sim.bind(c3, "A")
+sim.link(b3, bc)
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", bc)
+
+sim.end()
+
diff --git a/python/pal2py b/python/pal2py
deleted file mode 100755
index 544151bf76..0000000000
--- a/python/pal2py
+++ /dev/null
@@ -1,274 +0,0 @@
-#!/usr/bin/env python
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-import sys, os, xml
-
-from qpid.spec import load, pythonize
-from textwrap import TextWrapper
-from xml.sax.handler import ContentHandler
-
-class Block:
-
- def __init__(self, children):
- self.children = children
-
- def emit(self, out):
- for child in self.children:
- if not hasattr(child, "emit"):
- raise ValueError(child)
- child.emit(out)
-
- if not self.children:
- out.line("pass")
-
-class If:
-
- def __init__(self, expr, cons, alt = None):
- self.expr = expr
- self.cons = cons
- self.alt = alt
-
- def emit(self, out):
- out.line("if ")
- self.expr.emit(out)
- out.write(":")
- out.level += 1
- self.cons.emit(out)
- out.level -= 1
- if self.alt:
- out.line("else:")
- out.level += 1
- self.alt.emit(out)
- out.level -= 1
-
-class Stmt:
-
- def __init__(self, code):
- self.code = code
-
- def emit(self, out):
- out.line(self.code)
-
-class Expr:
-
- def __init__(self, code):
- self.code = code
-
- def emit(self, out):
- out.write(self.code)
-
-class Abort:
-
- def __init__(self, expr):
- self.expr = expr
-
- def emit(self, out):
- out.line("assert False, ")
- self.expr.emit(out)
-
-WRAPPER = TextWrapper()
-
-def wrap(text):
- return WRAPPER.wrap(" ".join(text.split()))
-
-class Doc:
-
- def __init__(self, text):
- self.text = text
-
- def emit(self, out):
- out.line('"""')
- for line in wrap(self.text):
- out.line(line)
- out.line('"""')
-
-class Frame:
-
- def __init__(self, attrs):
- self.attrs = attrs
- self.children = []
- self.text = None
-
- def __getattr__(self, attr):
- return self.attrs[attr]
-
-def isunicode(s):
- if isinstance(s, str):
- return False
- for ch in s:
- if ord(ch) > 127:
- return True
- return False
-
-def string_literal(s):
- if s == None:
- return None
- if isunicode(s):
- return "%r" % s
- else:
- return "%r" % str(s)
-
-TRUTH = {
- "1": True,
- "0": False,
- "true": True,
- "false": False
- }
-
-LITERAL = {
- "shortstr": string_literal,
- "longstr": string_literal,
- "bit": lambda s: TRUTH[s.lower()],
- "longlong": lambda s: "%r" % long(s)
- }
-
-def literal(s, field):
- return LITERAL[field.type](s)
-
-def palexpr(s, field):
- if s.startswith("$"):
- return "msg.%s" % s[1:]
- else:
- return literal(s, field)
-
-class Translator(ContentHandler):
-
- def __init__(self, spec):
- self.spec = spec
- self.stack = []
- self.content = None
- self.root = Frame(None)
- self.push(self.root)
-
- def emit(self, out):
- blk = Block(self.root.children)
- blk.emit(out)
- out.write("\n")
-
- def peek(self):
- return self.stack[-1]
-
- def pop(self):
- return self.stack.pop()
-
- def push(self, frame):
- self.stack.append(frame)
-
- def startElement(self, name, attrs):
- self.push(Frame(attrs))
-
- def endElement(self, name):
- frame = self.pop()
- if hasattr(self, name):
- child = getattr(self, name)(frame)
- else:
- child = self.handle(name, frame)
-
- if child:
- self.peek().children.append(child)
-
- def characters(self, text):
- frame = self.peek()
- if frame.text:
- frame.text += text
- else:
- frame.text = text
-
- def handle(self, name, frame):
- for klass in self.spec.classes:
- pyklass = pythonize(klass.name)
- if name.startswith(pyklass):
- name = name[len(pyklass) + 1:]
- break
- else:
- raise ValueError("unknown class: %s" % name)
-
- for method in klass.methods:
- pymethod = pythonize(method.name)
- if name == pymethod:
- break
- else:
- raise ValueError("unknown method: %s" % name)
-
- args = ["%s = %s" % (key, palexpr(val, method.fields.bypyname[key]))
- for key, val in frame.attrs.items()]
- if method.content and self.content:
- args.append("content = %r" % string_literal(self.content))
- code = "ssn.%s_%s(%s)" % (pyklass, pymethod, ", ".join(args))
- if pymethod == "consume":
- code = "consumer_tag = %s.consumer_tag" % code
- return Stmt(code)
-
- def pal(self, frame):
- return Block([Doc(frame.text)] + frame.children)
-
- def include(self, frame):
- base, ext = os.path.splitext(frame.filename)
- return Stmt("from %s import *" % base)
-
- def session(self, frame):
- return Block([Stmt("cli = open()"), Stmt("ssn = cli.channel(0)"),
- Stmt("ssn.channel_open()")] + frame.children)
-
- def empty(self, frame):
- return If(Expr("msg == None"), Block(frame.children))
-
- def abort(self, frame):
- return Abort(Expr(string_literal(frame.text)))
-
- def wait(self, frame):
- return Stmt("msg = ssn.queue(consumer_tag).get(timeout=%r)" %
- (int(frame.timeout)/1000))
-
- def basic_arrived(self, frame):
- if frame.children:
- return If(Expr("msg != None"), Block(frame.children))
-
- def basic_content(self, frame):
- self.content = frame.text
-
-class Emitter:
-
- def __init__(self, out):
- self.out = out
- self.level = 0
-
- def write(self, code):
- self.out.write(code)
-
- def line(self, code):
- self.write("\n%s%s" % (" "*self.level, code))
-
- def flush(self):
- self.out.flush()
-
- def close(self):
- self.out.close()
-
-
-for f in sys.argv[2:]:
- base, ext = os.path.splitext(f)
- spec = load(sys.argv[1])
- t = Translator(spec)
- xml.sax.parse(f, t)
-# out = Emitter(open("%s.py" % base))
- out = Emitter(sys.stdout)
- t.emit(out)
- out.close()
diff --git a/python/perftest b/python/perftest
deleted file mode 100755
index 2e9148ce50..0000000000
--- a/python/perftest
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-
-def publisher(n):
- import qpid
- import sys
- from qpid.client import Client
- from qpid.content import Content
- if len(sys.argv) >= 3:
- n = int(sys.argv[2])
- client = Client("127.0.0.1", 5672)
- client.start({"LOGIN": "guest", "PASSWORD": "guest"})
- channel = client.channel(1)
- channel.session_open()
- message = Content("message")
- message["routing_key"] = "message_queue"
- print "producing ", n, " messages"
- for i in range(n):
- channel.message_transfer(destination="amq.direct", content=message)
-
- print "producing final message"
- message = Content("That's done")
- message["routing_key"] = "message_queue"
- channel.message_transfer(destination="amq.direct", content=message)
-
- print "consuming sync message"
- consumer = "consumer"
- queue = client.queue(consumer)
- channel.message_subscribe(queue="sync_queue", destination=consumer)
- channel.message_flow(consumer, 0, 0xFFFFFFFF)
- channel.message_flow(consumer, 1, 0xFFFFFFFF)
- queue.get(block = True)
- print "done"
- channel.session_close()
-
-def consumer():
- import sys
- import qpid
- from qpid.client import Client
- from qpid.content import Content
- client = Client("127.0.0.1", 5672)
- client.start({"LOGIN": "guest", "PASSWORD": "guest"})
- channel = client.channel(1)
- channel.session_open()
- consumer = "consumer"
- queue = client.queue(consumer)
- channel.message_subscribe(queue="message_queue", destination=consumer)
- channel.message_flow(consumer, 0, 0xFFFFFFFF)
- channel.message_flow(consumer, 1, 0xFFFFFFFF)
- final = "That's done"
- content = ""
- message = None
- print "getting messages"
- while content != final:
- message = queue.get(block = True)
- content = message.content.body
- message.complete(cumulative=True)
-
- print "consumed all messages"
- message = Content("message")
- message["routing_key"] = "sync_queue"
- channel.message_transfer(destination="amq.direct", content=message)
- print "done"
- channel.session_close()
-
-if __name__=='__main__':
- import sys
- import qpid
- from timeit import Timer
- from qpid.client import Client
- from qpid.content import Content
- client = Client("127.0.0.1", 5672)
- client.start({"LOGIN": "guest", "PASSWORD": "guest"})
- channel = client.channel(1)
- channel.session_open()
- channel.queue_declare(queue="message_queue")
- channel.queue_bind(exchange="amq.direct", queue="message_queue", routing_key="message_queue")
- channel.queue_declare(queue="sync_queue")
- channel.queue_bind(exchange="amq.direct", queue="sync_queue", routing_key="sync_queue")
- channel.session_close()
-
- numMess = 100
- if len(sys.argv) >= 3:
- numMess = int(sys.argv[2])
- if len(sys.argv) == 1:
- print "error: please specify prod or cons"
- elif sys.argv[1] == 'prod':
- tprod = Timer("publisher(100)", "from __main__ import publisher")
- tp = tprod.timeit(1)
- print "produced and consumed" , numMess + 2 ,"messages in: ", tp
- elif sys.argv[1] == 'cons':
- tcons = Timer("consumer()", "from __main__ import consumer")
- tc = tcons.timeit(1)
- print "consumed " , numMess ," in: ", tc
- else:
- print "please specify prod or cons"
diff --git a/python/preppy b/python/preppy
new file mode 100755
index 0000000000..22893dad03
--- /dev/null
+++ b/python/preppy
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os, re, sys
+
+ann = re.compile(r"([ \t]*)@([_a-zA-Z][_a-zA-Z0-9]*)([ \t\n\r]+def[ \t]+)([_a-zA-Z][_a-zA-Z0-9]*)")
+line = re.compile(r"\n([ \t]*)[^ \t\n#]+")
+
+if len(sys.argv) == 2:
+ major, minor = [int(p) for p in sys.argv[1].split(".")]
+elif len(sys.argv) == 1:
+ major, minor = sys.version_info[0:2]
+else:
+ print "usage: %s [ version ] < input.py > output.py" % sys.argv[0]
+ sys.exit(-1)
+
+if major <= 2 and minor <= 3:
+ def process(input):
+ output = ""
+ pos = 0
+ while True:
+ m = ann.search(input, pos)
+ if m:
+ indent, decorator, idef, function = m.groups()
+ output += input[pos:m.start()]
+ output += "%s#@%s%s%s" % (indent, decorator, idef, function)
+ pos = m.end()
+
+ subst = "\n%s%s = %s(%s)\n" % (indent, function, decorator, function)
+ npos = pos
+ while True:
+ n = line.search(input, npos)
+ if not n:
+ input += subst
+ break
+ if len(n.group(1)) <= len(indent):
+ idx = n.start()
+ input = input[:idx] + subst + input[idx:]
+ break
+ npos = n.end()
+ else:
+ break
+
+ output += input[pos:]
+ return output
+else:
+ def process(input):
+ return input
+
+sys.stdout.write(process(sys.stdin.read()))
diff --git a/python/qmf/__init__.py b/python/qmf/__init__.py
new file mode 100644
index 0000000000..31d5a2ef58
--- /dev/null
+++ b/python/qmf/__init__.py
@@ -0,0 +1,18 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/python/qmf/console.py b/python/qmf/console.py
new file mode 100644
index 0000000000..5348904097
--- /dev/null
+++ b/python/qmf/console.py
@@ -0,0 +1,1970 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+""" Console API for Qpid Management Framework """
+
+import os
+import platform
+import qpid
+import struct
+import socket
+import re
+from qpid.datatypes import UUID
+from qpid.datatypes import timestamp
+from qpid.datatypes import datetime
+from qpid.peer import Closed
+from qpid.session import SessionDetached
+from qpid.connection import Connection, ConnectionFailed, Timeout
+from qpid.datatypes import Message, RangedSet, UUID
+from qpid.util import connect, ssl, URL
+from qpid.codec010 import StringCodec as Codec
+from threading import Lock, Condition, Thread
+from time import time, strftime, gmtime
+from cStringIO import StringIO
+
+#import qpid.log
+#qpid.log.enable(name="qpid.io.cmd", level=qpid.log.DEBUG)
+
+class Console:
+ """ To access the asynchronous operations, a class must be derived from
+ Console with overrides of any combination of the available methods. """
+
+ def brokerConnected(self, broker):
+ """ Invoked when a connection is established to a broker """
+ pass
+
+ def brokerDisconnected(self, broker):
+ """ Invoked when the connection to a broker is lost """
+ pass
+
+ def newPackage(self, name):
+ """ Invoked when a QMF package is discovered. """
+ pass
+
+ def newClass(self, kind, classKey):
+ """ Invoked when a new class is discovered. Session.getSchema can be
+ used to obtain details about the class."""
+ pass
+
+ def newAgent(self, agent):
+ """ Invoked when a QMF agent is discovered. """
+ pass
+
+ def delAgent(self, agent):
+ """ Invoked when a QMF agent disconects. """
+ pass
+
+ def objectProps(self, broker, record):
+ """ Invoked when an object is updated. """
+ pass
+
+ def objectStats(self, broker, record):
+ """ Invoked when an object is updated. """
+ pass
+
+ def event(self, broker, event):
+ """ Invoked when an event is raised. """
+ pass
+
+ def heartbeat(self, agent, timestamp):
+ """ Invoked when an agent heartbeat is received. """
+ pass
+
+ def brokerInfo(self, broker):
+ """ Invoked when the connection sequence reaches the point where broker information is available. """
+ pass
+
+ def methodResponse(self, broker, seq, response):
+ """ Invoked when a method response from an asynchronous method call is received. """
+ pass
+
+class BrokerURL(URL):
+ def __init__(self, text):
+ URL.__init__(self, text)
+ if self.port is None:
+ if self.scheme == URL.AMQPS:
+ self.port = 5671
+ else:
+ self.port = 5672
+ self.authName = None
+ self.authPass = None
+ if self.user:
+ self.authName = str(self.user)
+ if self.password:
+ self.authPass = str(self.password)
+
+ def name(self):
+ return self.host + ":" + str(self.port)
+
+ def match(self, host, port):
+ return socket.getaddrinfo(self.host, self.port)[0][4] == socket.getaddrinfo(host, port)[0][4]
+
+class Object(object):
+ """ This class defines a 'proxy' object representing a real managed object on an agent.
+ Actions taken on this proxy are remotely affected on the real managed object.
+ """
+ def __init__(self, session, broker, schema, codec, prop, stat, managed=True, kwargs={}):
+ self._session = session
+ self._broker = broker
+ self._schema = schema
+ self._managed = managed
+ if self._managed:
+ self._currentTime = codec.read_uint64()
+ self._createTime = codec.read_uint64()
+ self._deleteTime = codec.read_uint64()
+ self._objectId = ObjectId(codec)
+ else:
+ self._currentTime = None
+ self._createTime = None
+ self._deleteTime = None
+ self._objectId = None
+ self._properties = []
+ self._statistics = []
+ if codec:
+ if prop:
+ notPresent = self._parsePresenceMasks(codec, schema)
+ for property in schema.getProperties():
+ if property.name in notPresent:
+ self._properties.append((property, None))
+ else:
+ self._properties.append((property, self._session._decodeValue(codec, property.type, broker)))
+ if stat:
+ for statistic in schema.getStatistics():
+ self._statistics.append((statistic, self._session._decodeValue(codec, statistic.type, broker)))
+ else:
+ for property in schema.getProperties():
+ if property.optional:
+ self._properties.append((property, None))
+ else:
+ self._properties.append((property, self._session._defaultValue(property, broker, kwargs)))
+ for statistic in schema.getStatistics():
+ self._statistics.append((statistic, self._session._defaultValue(statistic, broker, kwargs)))
+
+ def getBroker(self):
+ """ Return the broker from which this object was sent """
+ return self._broker
+
+ def getObjectId(self):
+ """ Return the object identifier for this object """
+ return self._objectId
+
+ def getClassKey(self):
+ """ Return the class-key that references the schema describing this object. """
+ return self._schema.getKey()
+
+ def getSchema(self):
+ """ Return the schema that describes this object. """
+ return self._schema
+
+ def getMethods(self):
+ """ Return a list of methods available for this object. """
+ return self._schema.getMethods()
+
+ def getTimestamps(self):
+ """ Return the current, creation, and deletion times for this object. """
+ return self._currentTime, self._createTime, self._deleteTime
+
+ def isDeleted(self):
+ """ Return True iff this object has been deleted. """
+ return self._deleteTime != 0
+
+ def isManaged(self):
+ """ Return True iff this object is a proxy for a managed object on an agent. """
+ return self._managed
+
+ def getIndex(self):
+ """ Return a string describing this object's primary key. """
+ result = u""
+ for property, value in self._properties:
+ if property.index:
+ if result != u"":
+ result += u":"
+ try:
+ valstr = unicode(self._session._displayValue(value, property.type))
+ except:
+ valstr = u"<undecodable>"
+ result += valstr
+ return result
+
+ def getProperties(self):
+ """ Return a list of object properties """
+ return self._properties
+
+ def getStatistics(self):
+ """ Return a list of object statistics """
+ return self._statistics
+
+ def mergeUpdate(self, newer):
+ """ Replace properties and/or statistics with a newly received update """
+ if not self.isManaged():
+ raise Exception("Object is not managed")
+ if self._objectId != newer._objectId:
+ raise Exception("Objects with different object-ids")
+ if len(newer.getProperties()) > 0:
+ self._properties = newer.getProperties()
+ if len(newer.getStatistics()) > 0:
+ self._statistics = newer.getStatistics()
+
+ def update(self):
+ """ Contact the agent and retrieve the lastest property and statistic values for this object. """
+ if not self.isManaged():
+ raise Exception("Object is not managed")
+ obj = self._session.getObjects(_objectId = self._objectId, _broker=self._broker)
+ if obj:
+ self.mergeUpdate(obj[0])
+ else:
+ raise Exception("Underlying object no longer exists")
+
+ def __repr__(self):
+ if self.isManaged():
+ id = self.getObjectId().__repr__()
+ else:
+ id = "unmanaged"
+ key = self.getClassKey()
+ return key.getPackageName() + ":" + key.getClassName() +\
+ "[" + id + "] " + self.getIndex().encode("utf8")
+
+ def __getattr__(self, name):
+ for method in self._schema.getMethods():
+ if name == method.name:
+ return lambda *args, **kwargs : self._invoke(name, args, kwargs)
+ for property, value in self._properties:
+ if name == property.name:
+ return value
+ if name == "_" + property.name + "_" and property.type == 10: # Dereference references
+ deref = self._session.getObjects(_objectId=value, _broker=self._broker)
+ if len(deref) != 1:
+ return None
+ else:
+ return deref[0]
+ for statistic, value in self._statistics:
+ if name == statistic.name:
+ return value
+ raise Exception("Type Object has no attribute '%s'" % name)
+
+ def __setattr__(self, name, value):
+ if name[0] == '_':
+ super.__setattr__(self, name, value)
+ return
+
+ for prop, unusedValue in self._properties:
+ if name == prop.name:
+ newprop = (prop, value)
+ newlist = []
+ for old, val in self._properties:
+ if name == old.name:
+ newlist.append(newprop)
+ else:
+ newlist.append((old, val))
+ self._properties = newlist
+ return
+ super.__setattr__(self, name, value)
+
+ def _sendMethodRequest(self, name, args, kwargs, synchronous=False, timeWait=None):
+ for method in self._schema.getMethods():
+ if name == method.name:
+ aIdx = 0
+ sendCodec = Codec()
+ seq = self._session.seqMgr._reserve((method, synchronous))
+ self._broker._setHeader(sendCodec, 'M', seq)
+ self._objectId.encode(sendCodec)
+ self._schema.getKey().encode(sendCodec)
+ sendCodec.write_str8(name)
+
+ count = 0
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ count += 1
+ if count != len(args):
+ raise Exception("Incorrect number of arguments: expected %d, got %d" % (count, len(args)))
+
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ self._session._encodeValue(sendCodec, args[aIdx], arg.type)
+ aIdx += 1
+ if timeWait:
+ ttl = timeWait * 1000
+ else:
+ ttl = None
+ smsg = self._broker._message(sendCodec.encoded, "agent.%d.%d" %
+ (self._objectId.getBrokerBank(), self._objectId.getAgentBank()),
+ ttl=ttl)
+ if synchronous:
+ try:
+ self._broker.cv.acquire()
+ self._broker.syncInFlight = True
+ finally:
+ self._broker.cv.release()
+ self._broker._send(smsg)
+ return seq
+ return None
+
+ def _invoke(self, name, args, kwargs):
+ if not self.isManaged():
+ raise Exception("Object is not managed")
+ if "_timeout" in kwargs:
+ timeout = kwargs["_timeout"]
+ else:
+ timeout = self._broker.SYNC_TIME
+
+ if "_async" in kwargs and kwargs["_async"]:
+ sync = False
+ if "_timeout" not in kwargs:
+ timeout = None
+ else:
+ sync = True
+
+ seq = self._sendMethodRequest(name, args, kwargs, sync, timeout)
+ if seq:
+ if not sync:
+ return seq
+ try:
+ self._broker.cv.acquire()
+ starttime = time()
+ while self._broker.syncInFlight and self._broker.error == None:
+ self._broker.cv.wait(timeout)
+ if time() - starttime > timeout:
+ self._session.seqMgr._release(seq)
+ raise RuntimeError("Timed out waiting for method to respond")
+ finally:
+ self._broker.cv.release()
+ if self._broker.error != None:
+ errorText = self._broker.error
+ self._broker.error = None
+ raise Exception(errorText)
+ return self._broker.syncResult
+ raise Exception("Invalid Method (software defect) [%s]" % name)
+
+ def _encodeUnmanaged(self, codec):
+
+ codec.write_uint8(20)
+ codec.write_str8(self._schema.getKey().getPackageName())
+ codec.write_str8(self._schema.getKey().getClassName())
+ codec.write_bin128(self._schema.getKey().getHash())
+
+ # emit presence masks for optional properties
+ mask = 0
+ bit = 0
+ for prop, value in self._properties:
+ if prop.optional:
+ if bit == 0:
+ bit = 1
+ if value:
+ mask |= bit
+ bit = bit << 1
+ if bit == 256:
+ bit = 0
+ codec.write_uint8(mask)
+ mask = 0
+ if bit != 0:
+ codec.write_uint8(mask)
+
+ # encode properties
+ for prop, value in self._properties:
+ if value != None:
+ self._session._encodeValue(codec, value, prop.type)
+
+ # encode statistics
+ for stat, value in self._statistics:
+ self._session._encodeValue(codec, value, stat.type)
+
+ def _parsePresenceMasks(self, codec, schema):
+ excludeList = []
+ bit = 0
+ for property in schema.getProperties():
+ if property.optional:
+ if bit == 0:
+ mask = codec.read_uint8()
+ bit = 1
+ if (mask & bit) == 0:
+ excludeList.append(property.name)
+ bit *= 2
+ if bit == 256:
+ bit = 0
+ return excludeList
+
+class Session:
+ """
+ An instance of the Session class represents a console session running
+ against one or more QMF brokers. A single instance of Session is needed
+ to interact with the management framework as a console.
+ """
+ _CONTEXT_SYNC = 1
+ _CONTEXT_STARTUP = 2
+ _CONTEXT_MULTIGET = 3
+
+ DEFAULT_GET_WAIT_TIME = 60
+
+ ENCODINGS = {
+ str: 7,
+ timestamp: 8,
+ datetime: 8,
+ int: 9,
+ long: 9,
+ float: 13,
+ UUID: 14,
+ Object: 20,
+ list: 21
+ }
+
+ def __init__(self, console=None, rcvObjects=True, rcvEvents=True, rcvHeartbeats=True,
+ manageConnections=False, userBindings=False):
+ """
+ Initialize a session. If the console argument is provided, the
+ more advanced asynchronous features are available. If console is
+ defaulted, the session will operate in a simpler, synchronous manner.
+
+ The rcvObjects, rcvEvents, and rcvHeartbeats arguments are meaningful only if 'console'
+ is provided. They control whether object updates, events, and agent-heartbeats are
+ subscribed to. If the console is not interested in receiving one or more of the above,
+ setting the argument to False will reduce tha bandwidth used by the API.
+
+ If manageConnections is set to True, the Session object will manage connections to
+ the brokers. This means that if a broker is unreachable, it will retry until a connection
+ can be established. If a connection is lost, the Session will attempt to reconnect.
+
+ If manageConnections is set to False, the user is responsible for handing failures. In
+ this case, an unreachable broker will cause addBroker to raise an exception.
+
+ If userBindings is set to False (the default) and rcvObjects is True, the console will
+ receive data for all object classes. If userBindings is set to True, the user must select
+ which classes the console shall receive by invoking the bindPackage or bindClass methods.
+ This allows the console to be configured to receive only information that is relavant to
+ a particular application. If rcvObjects id False, userBindings has no meaning.
+ """
+ self.console = console
+ self.brokers = []
+ self.packages = {}
+ self.seqMgr = SequenceManager()
+ self.cv = Condition()
+ self.syncSequenceList = []
+ self.getResult = []
+ self.getSelect = []
+ self.error = None
+ self.rcvObjects = rcvObjects
+ self.rcvEvents = rcvEvents
+ self.rcvHeartbeats = rcvHeartbeats
+ self.userBindings = userBindings
+ if self.console == None:
+ self.rcvObjects = False
+ self.rcvEvents = False
+ self.rcvHeartbeats = False
+ self.bindingKeyList = self._bindingKeys()
+ self.manageConnections = manageConnections
+
+ if self.userBindings and not self.rcvObjects:
+ raise Exception("userBindings can't be set unless rcvObjects is set and a console is provided")
+
+ def __repr__(self):
+ return "QMF Console Session Manager (brokers: %d)" % len(self.brokers)
+
+ def addBroker(self, target="localhost", timeout=None, mechanisms=None):
+ """ Connect to a Qpid broker. Returns an object of type Broker. """
+ url = BrokerURL(target)
+ broker = Broker(self, url.host, url.port, mechanisms, url.authName, url.authPass,
+ ssl = url.scheme == URL.AMQPS, connTimeout=timeout)
+
+ self.brokers.append(broker)
+ if not self.manageConnections:
+ self.getObjects(broker=broker, _class="agent")
+ return broker
+
+ def delBroker(self, broker):
+ """ Disconnect from a broker. The 'broker' argument is the object
+ returned from the addBroker call """
+ if self.console:
+ for agent in broker.getAgents():
+ self.console.delAgent(agent)
+ broker._shutdown()
+ self.brokers.remove(broker)
+ del broker
+
+ def getPackages(self):
+ """ Get the list of known QMF packages """
+ for broker in self.brokers:
+ broker._waitForStable()
+ list = []
+ for package in self.packages:
+ list.append(package)
+ return list
+
+ def getClasses(self, packageName):
+ """ Get the list of known classes within a QMF package """
+ for broker in self.brokers:
+ broker._waitForStable()
+ list = []
+ if packageName in self.packages:
+ for pkey in self.packages[packageName]:
+ list.append(self.packages[packageName][pkey].getKey())
+ return list
+
+ def getSchema(self, classKey):
+ """ Get the schema for a QMF class """
+ for broker in self.brokers:
+ broker._waitForStable()
+ pname = classKey.getPackageName()
+ pkey = classKey.getPackageKey()
+ if pname in self.packages:
+ if pkey in self.packages[pname]:
+ return self.packages[pname][pkey]
+
+ def bindPackage(self, packageName):
+ """ Request object updates for all table classes within a package. """
+ if not self.userBindings or not self.rcvObjects:
+ raise Exception("userBindings option not set for Session")
+ key = "console.obj.*.*.%s.#" % packageName
+ self.bindingKeyList.append(key)
+ for broker in self.brokers:
+ if broker.isConnected():
+ broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName,
+ binding_key=key)
+
+ def bindClass(self, pname, cname):
+ """ Request object updates for a particular table class by package and class name. """
+ if not self.userBindings or not self.rcvObjects:
+ raise Exception("userBindings option not set for Session")
+ key = "console.obj.*.*.%s.%s.#" % (pname, cname)
+ self.bindingKeyList.append(key)
+ for broker in self.brokers:
+ if broker.isConnected():
+ broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName,
+ binding_key=key)
+
+ def bindClassKey(self, classKey):
+ """ Request object updates for a particular table class by class key. """
+ pname = classKey.getPackageName()
+ cname = classKey.getClassName()
+ self.bindClass(pname, cname)
+
+ def getAgents(self, broker=None):
+ """ Get a list of currently known agents """
+ brokerList = []
+ if broker == None:
+ for b in self.brokers:
+ brokerList.append(b)
+ else:
+ brokerList.append(broker)
+
+ for b in brokerList:
+ b._waitForStable()
+ agentList = []
+ for b in brokerList:
+ for a in b.getAgents():
+ agentList.append(a)
+ return agentList
+
+ def makeObject(self, classKey, broker=None, **kwargs):
+ """ Create a new, unmanaged object of the schema indicated by classKey """
+ schema = self.getSchema(classKey)
+ if schema == None:
+ raise Exception("Schema not found for classKey")
+ return Object(self, broker, schema, None, True, True, False, kwargs)
+
+ def getObjects(self, **kwargs):
+ """ Get a list of objects from QMF agents.
+ All arguments are passed by name(keyword).
+
+ The class for queried objects may be specified in one of the following ways:
+
+ _schema = <schema> - supply a schema object returned from getSchema.
+ _key = <key> - supply a classKey from the list returned by getClasses.
+ _class = <name> - supply a class name as a string. If the class name exists
+ in multiple packages, a _package argument may also be supplied.
+ _objectId = <id> - get the object referenced by the object-id
+
+ If objects should be obtained from only one agent, use the following argument.
+ Otherwise, the query will go to all agents.
+
+ _agent = <agent> - supply an agent from the list returned by getAgents.
+
+ If the get query is to be restricted to one broker (as opposed to all connected brokers),
+ add the following argument:
+
+ _broker = <broker> - supply a broker as returned by addBroker.
+
+ The default timeout for this synchronous operation is 60 seconds. To change the timeout,
+ use the following argument:
+
+ _timeout = <time in seconds>
+
+ If additional arguments are supplied, they are used as property selectors. For example,
+ if the argument name="test" is supplied, only objects whose "name" property is "test"
+ will be returned in the result.
+ """
+ if "_broker" in kwargs:
+ brokerList = []
+ brokerList.append(kwargs["_broker"])
+ else:
+ brokerList = self.brokers
+ for broker in brokerList:
+ broker._waitForStable()
+ if broker.isConnected():
+ if "_package" not in kwargs or "_class" not in kwargs or \
+ kwargs["_package"] != "org.apache.qpid.broker" or \
+ kwargs["_class"] != "agent":
+ self.getObjects(_package = "org.apache.qpid.broker", _class = "agent",
+ _agent = broker.getAgent(1,0))
+
+ agentList = []
+ if "_agent" in kwargs:
+ agent = kwargs["_agent"]
+ if agent.broker not in brokerList:
+ raise Exception("Supplied agent is not accessible through the supplied broker")
+ if agent.broker.isConnected():
+ agentList.append(agent)
+ else:
+ if "_objectId" in kwargs:
+ oid = kwargs["_objectId"]
+ for broker in brokerList:
+ for agent in broker.getAgents():
+ if agent.getBrokerBank() == oid.getBrokerBank() and agent.getAgentBank() == oid.getAgentBank():
+ agentList.append(agent)
+ else:
+ for broker in brokerList:
+ for agent in broker.getAgents():
+ if agent.broker.isConnected():
+ agentList.append(agent)
+
+ if len(agentList) == 0:
+ return []
+
+ pname = None
+ cname = None
+ hash = None
+ classKey = None
+ if "_schema" in kwargs: classKey = kwargs["_schema"].getKey()
+ elif "_key" in kwargs: classKey = kwargs["_key"]
+ elif "_class" in kwargs:
+ cname = kwargs["_class"]
+ if "_package" in kwargs:
+ pname = kwargs["_package"]
+ if cname == None and classKey == None and "_objectId" not in kwargs:
+ raise Exception("No class supplied, use '_schema', '_key', '_class', or '_objectId' argument")
+
+ map = {}
+ self.getSelect = []
+ if "_objectId" in kwargs:
+ map["_objectid"] = kwargs["_objectId"].__repr__()
+ else:
+ if cname == None:
+ cname = classKey.getClassName()
+ pname = classKey.getPackageName()
+ hash = classKey.getHash()
+ map["_class"] = cname
+ if pname != None: map["_package"] = pname
+ if hash != None: map["_hash"] = hash
+ for item in kwargs:
+ if item[0] != '_':
+ self.getSelect.append((item, kwargs[item]))
+
+ self.getResult = []
+ for agent in agentList:
+ broker = agent.broker
+ sendCodec = Codec()
+ try:
+ self.cv.acquire()
+ seq = self.seqMgr._reserve(self._CONTEXT_MULTIGET)
+ self.syncSequenceList.append(seq)
+ finally:
+ self.cv.release()
+ broker._setHeader(sendCodec, 'G', seq)
+ sendCodec.write_map(map)
+ smsg = broker._message(sendCodec.encoded, "agent.%d.%d" % (agent.brokerBank, agent.agentBank))
+ broker._send(smsg)
+
+ starttime = time()
+ timeout = False
+ if "_timeout" in kwargs:
+ waitTime = kwargs["_timeout"]
+ else:
+ waitTime = self.DEFAULT_GET_WAIT_TIME
+ try:
+ self.cv.acquire()
+ while len(self.syncSequenceList) > 0 and self.error == None:
+ self.cv.wait(waitTime)
+ if time() - starttime > waitTime:
+ for pendingSeq in self.syncSequenceList:
+ self.seqMgr._release(pendingSeq)
+ self.syncSequenceList = []
+ timeout = True
+ finally:
+ self.cv.release()
+
+ if self.error:
+ errorText = self.error
+ self.error = None
+ raise Exception(errorText)
+
+ if len(self.getResult) == 0 and timeout:
+ raise RuntimeError("No agent responded within timeout period")
+ return self.getResult
+
+ def setEventFilter(self, **kwargs):
+ """ """
+ pass
+
+ def _bindingKeys(self):
+ keyList = []
+ keyList.append("schema.#")
+ if self.rcvObjects and self.rcvEvents and self.rcvHeartbeats and not self.userBindings:
+ keyList.append("console.#")
+ else:
+ if self.rcvObjects and not self.userBindings:
+ keyList.append("console.obj.#")
+ else:
+ keyList.append("console.obj.*.*.org.apache.qpid.broker.agent")
+ if self.rcvEvents:
+ keyList.append("console.event.#")
+ if self.rcvHeartbeats:
+ keyList.append("console.heartbeat.#")
+ return keyList
+
+ def _handleBrokerConnect(self, broker):
+ if self.console:
+ for agent in broker.getAgents():
+ self.console.newAgent(agent)
+ self.console.brokerConnected(broker)
+
+ def _handleBrokerDisconnect(self, broker):
+ if self.console:
+ for agent in broker.getAgents():
+ self.console.delAgent(agent)
+ self.console.brokerDisconnected(broker)
+
+ def _handleBrokerResp(self, broker, codec, seq):
+ broker.brokerId = codec.read_uuid()
+ if self.console != None:
+ self.console.brokerInfo(broker)
+
+ # Send a package request
+ # (effectively inc and dec outstanding by not doing anything)
+ sendCodec = Codec()
+ seq = self.seqMgr._reserve(self._CONTEXT_STARTUP)
+ broker._setHeader(sendCodec, 'P', seq)
+ smsg = broker._message(sendCodec.encoded)
+ broker._send(smsg)
+
+ def _handlePackageInd(self, broker, codec, seq):
+ pname = str(codec.read_str8())
+ notify = False
+ try:
+ self.cv.acquire()
+ if pname not in self.packages:
+ self.packages[pname] = {}
+ notify = True
+ finally:
+ self.cv.release()
+ if notify and self.console != None:
+ self.console.newPackage(pname)
+
+ # Send a class request
+ broker._incOutstanding()
+ sendCodec = Codec()
+ seq = self.seqMgr._reserve(self._CONTEXT_STARTUP)
+ broker._setHeader(sendCodec, 'Q', seq)
+ sendCodec.write_str8(pname)
+ smsg = broker._message(sendCodec.encoded)
+ broker._send(smsg)
+
+ def _handleCommandComplete(self, broker, codec, seq):
+ code = codec.read_uint32()
+ text = codec.read_str8()
+ context = self.seqMgr._release(seq)
+ if context == self._CONTEXT_STARTUP:
+ broker._decOutstanding()
+ elif context == self._CONTEXT_SYNC and seq == broker.syncSequence:
+ try:
+ broker.cv.acquire()
+ broker.syncInFlight = False
+ broker.cv.notify()
+ finally:
+ broker.cv.release()
+ elif context == self._CONTEXT_MULTIGET and seq in self.syncSequenceList:
+ try:
+ self.cv.acquire()
+ self.syncSequenceList.remove(seq)
+ if len(self.syncSequenceList) == 0:
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def _handleClassInd(self, broker, codec, seq):
+ kind = codec.read_uint8()
+ classKey = ClassKey(codec)
+ unknown = False
+
+ try:
+ self.cv.acquire()
+ if classKey.getPackageName() in self.packages:
+ if classKey.getPackageKey() not in self.packages[classKey.getPackageName()]:
+ unknown = True
+ finally:
+ self.cv.release()
+
+ if unknown:
+ # Send a schema request for the unknown class
+ broker._incOutstanding()
+ sendCodec = Codec()
+ seq = self.seqMgr._reserve(self._CONTEXT_STARTUP)
+ broker._setHeader(sendCodec, 'S', seq)
+ classKey.encode(sendCodec)
+ smsg = broker._message(sendCodec.encoded)
+ broker._send(smsg)
+
+ def _handleMethodResp(self, broker, codec, seq):
+ code = codec.read_uint32()
+ text = codec.read_str16()
+ outArgs = {}
+ pair = self.seqMgr._release(seq)
+ if pair == None:
+ return
+ method, synchronous = pair
+ if code == 0:
+ for arg in method.arguments:
+ if arg.dir.find("O") != -1:
+ outArgs[arg.name] = self._decodeValue(codec, arg.type, broker)
+ result = MethodResult(code, text, outArgs)
+ if synchronous:
+ try:
+ broker.cv.acquire()
+ broker.syncResult = result
+ broker.syncInFlight = False
+ broker.cv.notify()
+ finally:
+ broker.cv.release()
+ else:
+ if self.console:
+ self.console.methodResponse(broker, seq, result)
+
+ def _handleHeartbeatInd(self, broker, codec, seq, msg):
+ brokerBank = 1
+ agentBank = 0
+ dp = msg.get("delivery_properties")
+ if dp:
+ key = dp["routing_key"]
+ keyElements = key.split(".")
+ if len(keyElements) == 4:
+ brokerBank = int(keyElements[2])
+ agentBank = int(keyElements[3])
+
+ agent = broker.getAgent(brokerBank, agentBank)
+ timestamp = codec.read_uint64()
+ if self.console != None and agent != None:
+ self.console.heartbeat(agent, timestamp)
+
+ def _handleEventInd(self, broker, codec, seq):
+ if self.console != None:
+ event = Event(self, broker, codec)
+ self.console.event(broker, event)
+
+ def _handleSchemaResp(self, broker, codec, seq):
+ kind = codec.read_uint8()
+ classKey = ClassKey(codec)
+ _class = SchemaClass(kind, classKey, codec, self)
+ try:
+ self.cv.acquire()
+ self.packages[classKey.getPackageName()][classKey.getPackageKey()] = _class
+ finally:
+ self.cv.release()
+
+ self.seqMgr._release(seq)
+ broker._decOutstanding()
+ if self.console != None:
+ self.console.newClass(kind, classKey)
+
+ def _handleContentInd(self, broker, codec, seq, prop=False, stat=False):
+ classKey = ClassKey(codec)
+ try:
+ self.cv.acquire()
+ pname = classKey.getPackageName()
+ if pname not in self.packages:
+ return
+ pkey = classKey.getPackageKey()
+ if pkey not in self.packages[pname]:
+ return
+ schema = self.packages[pname][pkey]
+ finally:
+ self.cv.release()
+
+ object = Object(self, broker, schema, codec, prop, stat)
+ if pname == "org.apache.qpid.broker" and classKey.getClassName() == "agent" and prop:
+ broker._updateAgent(object)
+
+ try:
+ self.cv.acquire()
+ if seq in self.syncSequenceList:
+ if object.getTimestamps()[2] == 0 and self._selectMatch(object):
+ self.getResult.append(object)
+ return
+ finally:
+ self.cv.release()
+
+ if self.console and self.rcvObjects:
+ if prop:
+ self.console.objectProps(broker, object)
+ if stat:
+ self.console.objectStats(broker, object)
+
+ def _handleError(self, error):
+ try:
+ self.cv.acquire()
+ if len(self.syncSequenceList) > 0:
+ self.error = error
+ self.syncSequenceList = []
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def _selectMatch(self, object):
+ """ Check the object against self.getSelect to check for a match """
+ for key, value in self.getSelect:
+ for prop, propval in object.getProperties():
+ if key == prop.name and value != propval:
+ return False
+ return True
+
+ def _decodeValue(self, codec, typecode, broker=None):
+ """ Decode, from the codec, a value based on its typecode. """
+ if typecode == 1: data = codec.read_uint8() # U8
+ elif typecode == 2: data = codec.read_uint16() # U16
+ elif typecode == 3: data = codec.read_uint32() # U32
+ elif typecode == 4: data = codec.read_uint64() # U64
+ elif typecode == 6: data = codec.read_str8() # SSTR
+ elif typecode == 7: data = codec.read_str16() # LSTR
+ elif typecode == 8: data = codec.read_int64() # ABSTIME
+ elif typecode == 9: data = codec.read_uint64() # DELTATIME
+ elif typecode == 10: data = ObjectId(codec) # REF
+ elif typecode == 11: data = codec.read_uint8() != 0 # BOOL
+ elif typecode == 12: data = codec.read_float() # FLOAT
+ elif typecode == 13: data = codec.read_double() # DOUBLE
+ elif typecode == 14: data = codec.read_uuid() # UUID
+ elif typecode == 16: data = codec.read_int8() # S8
+ elif typecode == 17: data = codec.read_int16() # S16
+ elif typecode == 18: data = codec.read_int32() # S32
+ elif typecode == 19: data = codec.read_int64() # S63
+ elif typecode == 15: data = codec.read_map() # FTABLE
+ elif typecode == 20: # OBJECT
+ # Peek at the type, and if it is still 20 pull it decode. If
+ # Not, call back into self.
+ inner_type_code = codec.read_uint8()
+ if inner_type_code == 20:
+ classKey = ClassKey(codec)
+ try:
+ self.cv.acquire()
+ pname = classKey.getPackageName()
+ if pname not in self.packages:
+ return None
+ pkey = classKey.getPackageKey()
+ if pkey not in self.packages[pname]:
+ return None
+ schema = self.packages[pname][pkey]
+ finally:
+ self.cv.release()
+ data = Object(self, broker, schema, codec, True, True, False)
+ else:
+ data = self._decodeValue(codec, inner_type_code, broker)
+ elif typecode == 21: # List
+ #taken from codec10.read_list
+ sc = Codec(codec.read_vbin32())
+ count = sc.read_uint32()
+ data = []
+ while count > 0:
+ type = sc.read_uint8()
+ data.append(self._decodeValue(sc,type,broker))
+ count -= 1
+ elif typecode == 22: #Array
+ #taken from codec10.read_array
+ sc = Codec(codec.read_vbin32())
+ count = sc.read_uint32()
+ type = sc.read_uint8()
+ data = []
+ while count > 0:
+ data.append(self._decodeValue(sc,type,broker))
+ count -= 1
+ else:
+ raise ValueError("Invalid type code: %d" % typecode)
+ return data
+
+ def _encodeValue(self, codec, value, typecode):
+ """ Encode, into the codec, a value based on its typecode. """
+ if typecode == 1: codec.write_uint8 (int(value)) # U8
+ elif typecode == 2: codec.write_uint16 (int(value)) # U16
+ elif typecode == 3: codec.write_uint32 (long(value)) # U32
+ elif typecode == 4: codec.write_uint64 (long(value)) # U64
+ elif typecode == 6: codec.write_str8 (value) # SSTR
+ elif typecode == 7: codec.write_str16 (value) # LSTR
+ elif typecode == 8: codec.write_int64 (long(value)) # ABSTIME
+ elif typecode == 9: codec.write_uint64 (long(value)) # DELTATIME
+ elif typecode == 10: value.encode (codec) # REF
+ elif typecode == 11: codec.write_uint8 (int(value)) # BOOL
+ elif typecode == 12: codec.write_float (float(value)) # FLOAT
+ elif typecode == 13: codec.write_double (float(value)) # DOUBLE
+ elif typecode == 14: codec.write_uuid (value.bytes) # UUID
+ elif typecode == 16: codec.write_int8 (int(value)) # S8
+ elif typecode == 17: codec.write_int16 (int(value)) # S16
+ elif typecode == 18: codec.write_int32 (int(value)) # S32
+ elif typecode == 19: codec.write_int64 (int(value)) # S64
+ elif typecode == 20: value._encodeUnmanaged(codec) # OBJECT
+ elif typecode == 15: codec.write_map (value) # FTABLE
+ elif typecode == 21: # List
+ sc = Codec()
+ self._encodeValue(sc, len(value), 3)
+ for o in value:
+ ltype=self.encoding(o)
+ self._encodeValue(sc,ltype,1)
+ self._encodeValue(sc, o, ltype)
+ codec.write_vbin32(sc.encoded)
+ elif typecode == 22: # Array
+ sc = Codec()
+ self._encodeValue(sc, len(value), 3)
+ if len(value) > 0:
+ ltype = self.encoding(value[0])
+ self._encodeValue(sc,ltype,1)
+ for o in value:
+ self._encodeValue(sc, o, ltype)
+ codec.write_vbin32(sc.encoded)
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+
+ def encoding(self, value):
+ return self._encoding(value.__class__)
+
+ def _encoding(self, klass):
+ if Session.ENCODINGS.has_key(klass):
+ return self.ENCODINGS[klass]
+ for base in klass.__bases__:
+ result = self._encoding(base, obj)
+ if result != None:
+ return result
+
+ def _displayValue(self, value, typecode):
+ """ """
+ if typecode == 1: return unicode(value)
+ elif typecode == 2: return unicode(value)
+ elif typecode == 3: return unicode(value)
+ elif typecode == 4: return unicode(value)
+ elif typecode == 6: return value
+ elif typecode == 7: return value
+ elif typecode == 8: return unicode(strftime("%c", gmtime(value / 1000000000)))
+ elif typecode == 9: return unicode(value)
+ elif typecode == 10: return unicode(value.__repr__())
+ elif typecode == 11:
+ if value: return u"T"
+ else: return u"F"
+ elif typecode == 12: return unicode(value)
+ elif typecode == 13: return unicode(value)
+ elif typecode == 14: return unicode(value.__repr__())
+ elif typecode == 15: return unicode(value.__repr__())
+ elif typecode == 16: return unicode(value)
+ elif typecode == 17: return unicode(value)
+ elif typecode == 18: return unicode(value)
+ elif typecode == 19: return unicode(value)
+ elif typecode == 20: return unicode(value.__repr__())
+ elif typecode == 21: return unicode(value.__repr__())
+ elif typecode == 22: return unicode(value.__repr__())
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+
+ def _defaultValue(self, stype, broker=None, kwargs={}):
+ """ """
+ typecode = stype.type
+ if typecode == 1: return 0
+ elif typecode == 2: return 0
+ elif typecode == 3: return 0
+ elif typecode == 4: return 0
+ elif typecode == 6: return ""
+ elif typecode == 7: return ""
+ elif typecode == 8: return 0
+ elif typecode == 9: return 0
+ elif typecode == 10: return ObjectId(None)
+ elif typecode == 11: return False
+ elif typecode == 12: return 0.0
+ elif typecode == 13: return 0.0
+ elif typecode == 14: return UUID([0 for i in range(16)])
+ elif typecode == 15: return {}
+ elif typecode == 16: return 0
+ elif typecode == 17: return 0
+ elif typecode == 18: return 0
+ elif typecode == 19: return 0
+ elif typecode == 21: return []
+ elif typecode == 22: return []
+ elif typecode == 20:
+ try:
+ if "classKeys" in kwargs:
+ keyList = kwargs["classKeys"]
+ else:
+ keyList = None
+ classKey = self._bestClassKey(stype.refPackage, stype.refClass, keyList)
+ if classKey:
+ return self.makeObject(classKey, broker, kwargs)
+ except:
+ pass
+ return None
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+
+ def _bestClassKey(self, pname, cname, preferredList):
+ """ """
+ if pname == None or cname == None:
+ if len(preferredList) == 0:
+ return None
+ return preferredList[0]
+ for p in preferredList:
+ if p.getPackageName() == pname and p.getClassName() == cname:
+ return p
+ clist = self.getClasses(pname)
+ for c in clist:
+ if c.getClassName() == cname:
+ return c
+ return None
+
+ def _sendMethodRequest(self, broker, schemaKey, objectId, name, argList):
+ """ This function can be used to send a method request to an object given only the
+ broker, schemaKey, and objectId. This is an uncommon usage pattern as methods are
+ normally invoked on the object itself.
+ """
+ schema = self.getSchema(schemaKey)
+ for method in schema.getMethods():
+ if name == method.name:
+ aIdx = 0
+ sendCodec = Codec()
+ seq = self.seqMgr._reserve((method, False))
+ broker._setHeader(sendCodec, 'M', seq)
+ objectId.encode(sendCodec)
+ schemaKey.encode(sendCodec)
+ sendCodec.write_str8(name)
+
+ count = 0
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ count += 1
+ if count != len(argList):
+ raise Exception("Incorrect number of arguments: expected %d, got %d" % (count, len(argList)))
+
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ self._encodeValue(sendCodec, argList[aIdx], arg.type)
+ aIdx += 1
+ smsg = broker._message(sendCodec.encoded, "agent.%d.%d" %
+ (objectId.getBrokerBank(), objectId.getAgentBank()))
+ broker._send(smsg)
+ return seq
+ return None
+
+class Package:
+ """ """
+ def __init__(self, name):
+ self.name = name
+
+class ClassKey:
+ """ A ClassKey uniquely identifies a class from the schema. """
+ def __init__(self, constructor):
+ if type(constructor) == str:
+ # construct from __repr__ string
+ try:
+ self.pname, cls = constructor.split(":")
+ self.cname, hsh = cls.split("(")
+ hsh = hsh.strip(")")
+ hexValues = hsh.split("-")
+ h0 = int(hexValues[0], 16)
+ h1 = int(hexValues[1], 16)
+ h2 = int(hexValues[2], 16)
+ h3 = int(hexValues[3], 16)
+ self.hash = struct.pack("!LLLL", h0, h1, h2, h3)
+ except:
+ raise Exception("Invalid ClassKey format")
+ else:
+ # construct from codec
+ codec = constructor
+ self.pname = str(codec.read_str8())
+ self.cname = str(codec.read_str8())
+ self.hash = codec.read_bin128()
+
+ def encode(self, codec):
+ codec.write_str8(self.pname)
+ codec.write_str8(self.cname)
+ codec.write_bin128(self.hash)
+
+ def getPackageName(self):
+ return self.pname
+
+ def getClassName(self):
+ return self.cname
+
+ def getHash(self):
+ return self.hash
+
+ def getHashString(self):
+ return "%08x-%08x-%08x-%08x" % struct.unpack ("!LLLL", self.hash)
+
+ def getPackageKey(self):
+ return (self.cname, self.hash)
+
+ def __repr__(self):
+ return self.pname + ":" + self.cname + "(" + self.getHashString() + ")"
+
+class SchemaClass:
+ """ """
+ CLASS_KIND_TABLE = 1
+ CLASS_KIND_EVENT = 2
+
+ def __init__(self, kind, key, codec, session):
+ self.kind = kind
+ self.classKey = key
+ self.properties = []
+ self.statistics = []
+ self.methods = []
+ self.arguments = []
+ self.session = session
+
+ hasSupertype = 0 #codec.read_uint8()
+ if self.kind == self.CLASS_KIND_TABLE:
+ propCount = codec.read_uint16()
+ statCount = codec.read_uint16()
+ methodCount = codec.read_uint16()
+ if hasSupertype == 1:
+ self.superTypeKey = ClassKey(codec)
+ else:
+ self.superTypeKey = None ;
+ for idx in range(propCount):
+ self.properties.append(SchemaProperty(codec))
+ for idx in range(statCount):
+ self.statistics.append(SchemaStatistic(codec))
+ for idx in range(methodCount):
+ self.methods.append(SchemaMethod(codec))
+
+ elif self.kind == self.CLASS_KIND_EVENT:
+ argCount = codec.read_uint16()
+ if (hasSupertype):
+ self.superTypeKey = ClassKey(codec)
+ else:
+ self.superTypeKey = None ;
+ for idx in range(argCount):
+ self.arguments.append(SchemaArgument(codec, methodArg=False))
+
+ def __repr__(self):
+ if self.kind == self.CLASS_KIND_TABLE:
+ kindStr = "Table"
+ elif self.kind == self.CLASS_KIND_EVENT:
+ kindStr = "Event"
+ else:
+ kindStr = "Unsupported"
+ result = "%s Class: %s " % (kindStr, self.classKey.__repr__())
+ return result
+
+ def getKey(self):
+ """ Return the class-key for this class. """
+ return self.classKey
+
+ def getProperties(self):
+ """ Return the list of properties for the class. """
+ if (self.superTypeKey == None):
+ return self.properties
+ else:
+ return self.properties + self.session.getSchema(self.superTypeKey).getProperties()
+
+ def getStatistics(self):
+ """ Return the list of statistics for the class. """
+ if (self.superTypeKey == None):
+ return self.statistics
+ else:
+ return self.statistics + self.session.getSchema(self.superTypeKey).getStatistics()
+
+ def getMethods(self):
+ """ Return the list of methods for the class. """
+ if (self.superTypeKey == None):
+ return self.methods
+ else:
+ return self.methods + self.session.getSchema(self.superTypeKey).getMethods()
+
+ def getArguments(self):
+ """ Return the list of events for the class. """
+ """ Return the list of methods for the class. """
+ if (self.superTypeKey == None):
+ return self.arguments
+ else:
+ return self.arguments + self.session.getSchema(self.superTypeKey).getArguments()
+
+class SchemaProperty:
+ """ """
+ def __init__(self, codec):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ self.type = map["type"]
+ self.access = str(map["access"])
+ self.index = map["index"] != 0
+ self.optional = map["optional"] != 0
+ self.refPackage = None
+ self.refClass = None
+ self.unit = None
+ self.min = None
+ self.max = None
+ self.maxlen = None
+ self.desc = None
+
+ for key, value in map.items():
+ if key == "unit" : self.unit = value
+ elif key == "min" : self.min = value
+ elif key == "max" : self.max = value
+ elif key == "maxlen" : self.maxlen = value
+ elif key == "desc" : self.desc = value
+ elif key == "refPackage" : self.refPackage = value
+ elif key == "refClass" : self.refClass = value
+
+ def __repr__(self):
+ return self.name
+
+class SchemaStatistic:
+ """ """
+ def __init__(self, codec):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ self.type = map["type"]
+ self.unit = None
+ self.desc = None
+
+ for key, value in map.items():
+ if key == "unit" : self.unit = value
+ elif key == "desc" : self.desc = value
+
+ def __repr__(self):
+ return self.name
+
+class SchemaMethod:
+ """ """
+ def __init__(self, codec):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ argCount = map["argCount"]
+ if "desc" in map:
+ self.desc = map["desc"]
+ else:
+ self.desc = None
+ self.arguments = []
+
+ for idx in range(argCount):
+ self.arguments.append(SchemaArgument(codec, methodArg=True))
+
+ def __repr__(self):
+ result = self.name + "("
+ first = True
+ for arg in self.arguments:
+ if arg.dir.find("I") != -1:
+ if first:
+ first = False
+ else:
+ result += ", "
+ result += arg.name
+ result += ")"
+ return result
+
+class SchemaArgument:
+ """ """
+ def __init__(self, codec, methodArg):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ self.type = map["type"]
+ if methodArg:
+ self.dir = str(map["dir"]).upper()
+ self.unit = None
+ self.min = None
+ self.max = None
+ self.maxlen = None
+ self.desc = None
+ self.default = None
+ self.refPackage = None
+ self.refClass = None
+
+ for key, value in map.items():
+ if key == "unit" : self.unit = value
+ elif key == "min" : self.min = value
+ elif key == "max" : self.max = value
+ elif key == "maxlen" : self.maxlen = value
+ elif key == "desc" : self.desc = value
+ elif key == "default" : self.default = value
+ elif key == "refPackage" : self.refPackage = value
+ elif key == "refClass" : self.refClass = value
+
+class ObjectId:
+ """ Object that represents QMF object identifiers """
+ def __init__(self, codec, first=0, second=0):
+ if codec:
+ self.first = codec.read_uint64()
+ self.second = codec.read_uint64()
+ else:
+ self.first = first
+ self.second = second
+
+ def __cmp__(self, other):
+ if other == None or not isinstance(other, ObjectId) :
+ return 1
+ if self.first < other.first:
+ return -1
+ if self.first > other.first:
+ return 1
+ if self.second < other.second:
+ return -1
+ if self.second > other.second:
+ return 1
+ return 0
+
+ def __repr__(self):
+ return "%d-%d-%d-%d-%d" % (self.getFlags(), self.getSequence(),
+ self.getBrokerBank(), self.getAgentBank(), self.getObject())
+
+ def index(self):
+ return (self.first, self.second)
+
+ def getFlags(self):
+ return (self.first & 0xF000000000000000) >> 60
+
+ def getSequence(self):
+ return (self.first & 0x0FFF000000000000) >> 48
+
+ def getBrokerBank(self):
+ return (self.first & 0x0000FFFFF0000000) >> 28
+
+ def getAgentBank(self):
+ return self.first & 0x000000000FFFFFFF
+
+ def getObject(self):
+ return self.second
+
+ def isDurable(self):
+ return self.getSequence() == 0
+
+ def encode(self, codec):
+ codec.write_uint64(self.first)
+ codec.write_uint64(self.second)
+
+ def __hash__(self):
+ return (self.first, self.second).__hash__()
+
+ def __eq__(self, other):
+ return (self.first, self.second).__eq__(other)
+
+class MethodResult(object):
+ """ """
+ def __init__(self, status, text, outArgs):
+ """ """
+ self.status = status
+ self.text = text
+ self.outArgs = outArgs
+
+ def __getattr__(self, name):
+ if name in self.outArgs:
+ return self.outArgs[name]
+
+ def __repr__(self):
+ return "%s (%d) - %s" % (self.text, self.status, self.outArgs)
+
+class ManagedConnection(Thread):
+ """ Thread class for managing a connection. """
+ DELAY_MIN = 1
+ DELAY_MAX = 128
+ DELAY_FACTOR = 2
+
+ def __init__(self, broker):
+ Thread.__init__(self)
+ self.broker = broker
+ self.cv = Condition()
+ self.canceled = False
+
+ def stop(self):
+ """ Tell this thread to stop running and return. """
+ try:
+ self.cv.acquire()
+ self.canceled = True
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def disconnected(self):
+ """ Notify the thread that the connection was lost. """
+ try:
+ self.cv.acquire()
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def run(self):
+ """ Main body of the running thread. """
+ delay = self.DELAY_MIN
+ while True:
+ try:
+ self.broker._tryToConnect()
+ try:
+ self.cv.acquire()
+ while (not self.canceled) and self.broker.connected:
+ self.cv.wait()
+ if self.canceled:
+ return
+ delay = self.DELAY_MIN
+ finally:
+ self.cv.release()
+ except socket.error:
+ if delay < self.DELAY_MAX:
+ delay *= self.DELAY_FACTOR
+ except SessionDetached:
+ if delay < self.DELAY_MAX:
+ delay *= self.DELAY_FACTOR
+ except Closed:
+ if delay < self.DELAY_MAX:
+ delay *= self.DELAY_FACTOR
+
+ try:
+ self.cv.acquire()
+ self.cv.wait(delay)
+ if self.canceled:
+ return
+ finally:
+ self.cv.release()
+
+class Broker:
+ """ This object represents a connection (or potential connection) to a QMF broker. """
+ SYNC_TIME = 60
+ nextSeq = 1
+
+ def __init__(self, session, host, port, authMechs, authUser, authPass, ssl=False, connTimeout=None):
+ self.session = session
+ self.host = host
+ self.port = port
+ self.mechanisms = authMechs
+ self.ssl = ssl
+ self.connTimeout = connTimeout
+ self.authUser = authUser
+ self.authPass = authPass
+ self.cv = Condition()
+ self.error = None
+ self.brokerId = None
+ self.connected = False
+ self.amqpSessionId = "%s.%d.%d" % (platform.uname()[1], os.getpid(), Broker.nextSeq)
+ Broker.nextSeq += 1
+ if self.session.manageConnections:
+ self.thread = ManagedConnection(self)
+ self.thread.start()
+ else:
+ self.thread = None
+ self._tryToConnect()
+
+ def isConnected(self):
+ """ Return True if there is an active connection to the broker. """
+ return self.connected
+
+ def getError(self):
+ """ Return the last error message seen while trying to connect to the broker. """
+ return self.error
+
+ def getBrokerId(self):
+ """ Get broker's unique identifier (UUID) """
+ return self.brokerId
+
+ def getBrokerBank(self):
+ """ Return the broker-bank value. This is the value that the broker assigns to
+ objects within its control. This value appears as a field in the ObjectId
+ of objects created by agents controlled by this broker. """
+ return 1
+
+ def getAgent(self, brokerBank, agentBank):
+ """ Return the agent object associated with a particular broker and agent bank value."""
+ bankKey = (brokerBank, agentBank)
+ if bankKey in self.agents:
+ return self.agents[bankKey]
+ return None
+
+ def getSessionId(self):
+ """ Get the identifier of the AMQP session to the broker """
+ return self.amqpSessionId
+
+ def getAgents(self):
+ """ Get the list of agents reachable via this broker """
+ return self.agents.values()
+
+ def getAmqpSession(self):
+ """ Get the AMQP session object for this connected broker. """
+ return self.amqpSession
+
+ def getUrl(self):
+ """ """
+ return "%s:%d" % (self.host, self.port)
+
+ def getFullUrl(self, noAuthIfGuestDefault=True):
+ """ """
+ ssl = ""
+ if self.ssl:
+ ssl = "s"
+ auth = "%s/%s@" % (self.authUser, self.authPass)
+ if self.authUser == "" or \
+ (noAuthIfGuestDefault and self.authUser == "guest" and self.authPass == "guest"):
+ auth = ""
+ return "amqp%s://%s%s:%d" % (ssl, auth, self.host, self.port or 5672)
+
+ def __repr__(self):
+ if self.connected:
+ return "Broker connected at: %s" % self.getUrl()
+ else:
+ return "Disconnected Broker"
+
+ def _tryToConnect(self):
+ try:
+ self.agents = {}
+ self.agents[(1,0)] = Agent(self, 0, "BrokerAgent")
+ self.topicBound = False
+ self.syncInFlight = False
+ self.syncRequest = 0
+ self.syncResult = None
+ self.reqsOutstanding = 1
+
+ sock = connect(self.host, self.port)
+ sock.settimeout(5)
+ oldTimeout = sock.gettimeout()
+ sock.settimeout(self.connTimeout)
+ if self.ssl:
+ connSock = ssl(sock)
+ else:
+ connSock = sock
+ self.conn = Connection(connSock, username=self.authUser, password=self.authPass,
+ mechanism = self.mechanisms, host=self.host, service="qpidd")
+ def aborted():
+ raise Timeout("Waiting for connection to be established with broker")
+ oldAborted = self.conn.aborted
+ self.conn.aborted = aborted
+ self.conn.start()
+ sock.settimeout(oldTimeout)
+ self.conn.aborted = oldAborted
+
+ self.replyName = "reply-%s" % self.amqpSessionId
+ self.amqpSession = self.conn.session(self.amqpSessionId)
+ self.amqpSession.auto_sync = True
+ self.amqpSession.queue_declare(queue=self.replyName, exclusive=True, auto_delete=True)
+ self.amqpSession.exchange_bind(exchange="amq.direct",
+ queue=self.replyName, binding_key=self.replyName)
+ self.amqpSession.message_subscribe(queue=self.replyName, destination="rdest",
+ accept_mode=self.amqpSession.accept_mode.none,
+ acquire_mode=self.amqpSession.acquire_mode.pre_acquired)
+ self.amqpSession.incoming("rdest").listen(self._replyCb, self._exceptionCb)
+ self.amqpSession.message_set_flow_mode(destination="rdest", flow_mode=1)
+ self.amqpSession.message_flow(destination="rdest", unit=0, value=0xFFFFFFFFL)
+ self.amqpSession.message_flow(destination="rdest", unit=1, value=0xFFFFFFFFL)
+
+ self.topicName = "topic-%s" % self.amqpSessionId
+ self.amqpSession.queue_declare(queue=self.topicName, exclusive=True, auto_delete=True)
+ self.amqpSession.message_subscribe(queue=self.topicName, destination="tdest",
+ accept_mode=self.amqpSession.accept_mode.none,
+ acquire_mode=self.amqpSession.acquire_mode.pre_acquired)
+ self.amqpSession.incoming("tdest").listen(self._replyCb)
+ self.amqpSession.message_set_flow_mode(destination="tdest", flow_mode=1)
+ self.amqpSession.message_flow(destination="tdest", unit=0, value=0xFFFFFFFFL)
+ self.amqpSession.message_flow(destination="tdest", unit=1, value=0xFFFFFFFFL)
+
+ self.connected = True
+ self.session._handleBrokerConnect(self)
+
+ codec = Codec()
+ self._setHeader(codec, 'B')
+ msg = self._message(codec.encoded)
+ self._send(msg)
+
+ except socket.error, e:
+ self.error = "Socket Error %s - %s" % (e.__class__.__name__, e)
+ raise
+ except Closed, e:
+ self.error = "Connect Failed %s - %s" % (e.__class__.__name__, e)
+ raise
+ except ConnectionFailed, e:
+ self.error = "Connect Failed %s - %s" % (e.__class__.__name__, e)
+ raise
+
+ def _updateAgent(self, obj):
+ bankKey = (obj.brokerBank, obj.agentBank)
+ if obj._deleteTime == 0:
+ if bankKey not in self.agents:
+ agent = Agent(self, obj.agentBank, obj.label)
+ self.agents[bankKey] = agent
+ if self.session.console != None:
+ self.session.console.newAgent(agent)
+ else:
+ agent = self.agents.pop(bankKey, None)
+ if agent != None and self.session.console != None:
+ self.session.console.delAgent(agent)
+
+ def _setHeader(self, codec, opcode, seq=0):
+ """ Compose the header of a management message. """
+ codec.write_uint8(ord('A'))
+ codec.write_uint8(ord('M'))
+ codec.write_uint8(ord('2'))
+ codec.write_uint8(ord(opcode))
+ codec.write_uint32(seq)
+
+ def _checkHeader(self, codec):
+ """ Check the header of a management message and extract the opcode and class. """
+ try:
+ octet = chr(codec.read_uint8())
+ if octet != 'A':
+ return None, None
+ octet = chr(codec.read_uint8())
+ if octet != 'M':
+ return None, None
+ octet = chr(codec.read_uint8())
+ if octet != '2':
+ return None, None
+ opcode = chr(codec.read_uint8())
+ seq = codec.read_uint32()
+ return opcode, seq
+ except:
+ return None, None
+
+ def _message (self, body, routing_key="broker", ttl=None):
+ dp = self.amqpSession.delivery_properties()
+ dp.routing_key = routing_key
+ if ttl:
+ dp.ttl = ttl
+ mp = self.amqpSession.message_properties()
+ mp.content_type = "x-application/qmf"
+ mp.user_id = self.authUser
+ mp.reply_to = self.amqpSession.reply_to("amq.direct", self.replyName)
+ return Message(dp, mp, body)
+
+ def _send(self, msg, dest="qpid.management"):
+ self.amqpSession.message_transfer(destination=dest, message=msg)
+
+ def _shutdown(self):
+ if self.thread:
+ self.thread.stop()
+ self.thread.join()
+ if self.connected:
+ self.amqpSession.incoming("rdest").stop()
+ if self.session.console != None:
+ self.amqpSession.incoming("tdest").stop()
+ self.amqpSession.close()
+ self.conn.close()
+ self.connected = False
+
+ def _waitForStable(self):
+ try:
+ self.cv.acquire()
+ if not self.connected:
+ return
+ if self.reqsOutstanding == 0:
+ return
+ self.syncInFlight = True
+ starttime = time()
+ while self.reqsOutstanding != 0:
+ self.cv.wait(self.SYNC_TIME)
+ if time() - starttime > self.SYNC_TIME:
+ raise RuntimeError("Timed out waiting for broker to synchronize")
+ finally:
+ self.cv.release()
+
+ def _incOutstanding(self):
+ try:
+ self.cv.acquire()
+ self.reqsOutstanding += 1
+ finally:
+ self.cv.release()
+
+ def _decOutstanding(self):
+ try:
+ self.cv.acquire()
+ self.reqsOutstanding -= 1
+ if self.reqsOutstanding == 0 and not self.topicBound:
+ self.topicBound = True
+ for key in self.session.bindingKeyList:
+ self.amqpSession.exchange_bind(exchange="qpid.management",
+ queue=self.topicName, binding_key=key)
+ if self.reqsOutstanding == 0 and self.syncInFlight:
+ self.syncInFlight = False
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def _replyCb(self, msg):
+ codec = Codec(msg.body)
+ while True:
+ opcode, seq = self._checkHeader(codec)
+ if opcode == None: return
+ if opcode == 'b': self.session._handleBrokerResp (self, codec, seq)
+ elif opcode == 'p': self.session._handlePackageInd (self, codec, seq)
+ elif opcode == 'z': self.session._handleCommandComplete (self, codec, seq)
+ elif opcode == 'q': self.session._handleClassInd (self, codec, seq)
+ elif opcode == 'm': self.session._handleMethodResp (self, codec, seq)
+ elif opcode == 'h': self.session._handleHeartbeatInd (self, codec, seq, msg)
+ elif opcode == 'e': self.session._handleEventInd (self, codec, seq)
+ elif opcode == 's': self.session._handleSchemaResp (self, codec, seq)
+ elif opcode == 'c': self.session._handleContentInd (self, codec, seq, prop=True)
+ elif opcode == 'i': self.session._handleContentInd (self, codec, seq, stat=True)
+ elif opcode == 'g': self.session._handleContentInd (self, codec, seq, prop=True, stat=True)
+
+ def _exceptionCb(self, data):
+ self.connected = False
+ self.error = data
+ try:
+ self.cv.acquire()
+ if self.syncInFlight:
+ self.cv.notify()
+ finally:
+ self.cv.release()
+ self.session._handleError(self.error)
+ self.session._handleBrokerDisconnect(self)
+ if self.thread:
+ self.thread.disconnected()
+
+class Agent:
+ """ """
+ def __init__(self, broker, agentBank, label):
+ self.broker = broker
+ self.brokerBank = broker.getBrokerBank()
+ self.agentBank = agentBank
+ self.label = label
+
+ def __repr__(self):
+ return "Agent at bank %d.%d (%s)" % (self.brokerBank, self.agentBank, self.label)
+
+ def getBroker(self):
+ return self.broker
+
+ def getBrokerBank(self):
+ return self.brokerBank
+
+ def getAgentBank(self):
+ return self.agentBank
+
+class Event:
+ """ """
+ def __init__(self, session, broker, codec):
+ self.session = session
+ self.broker = broker
+ self.classKey = ClassKey(codec)
+ self.timestamp = codec.read_int64()
+ self.severity = codec.read_uint8()
+ self.schema = None
+ pname = self.classKey.getPackageName()
+ pkey = self.classKey.getPackageKey()
+ if pname in session.packages:
+ if pkey in session.packages[pname]:
+ self.schema = session.packages[pname][pkey]
+ self.arguments = {}
+ for arg in self.schema.arguments:
+ self.arguments[arg.name] = session._decodeValue(codec, arg.type, broker)
+
+ def __repr__(self):
+ if self.schema == None:
+ return "<uninterpretable>"
+ out = strftime("%c", gmtime(self.timestamp / 1000000000))
+ out += " " + self._sevName() + " " + self.classKey.getPackageName() + ":" + self.classKey.getClassName()
+ out += " broker=" + self.broker.getUrl()
+ for arg in self.schema.arguments:
+ disp = self.session._displayValue(self.arguments[arg.name], arg.type).encode("utf8")
+ if " " in disp:
+ disp = "\"" + disp + "\""
+ out += " " + arg.name + "=" + disp
+ return out
+
+ def _sevName(self):
+ if self.severity == 0 : return "EMER "
+ if self.severity == 1 : return "ALERT"
+ if self.severity == 2 : return "CRIT "
+ if self.severity == 3 : return "ERROR"
+ if self.severity == 4 : return "WARN "
+ if self.severity == 5 : return "NOTIC"
+ if self.severity == 6 : return "INFO "
+ if self.severity == 7 : return "DEBUG"
+ return "INV-%d" % self.severity
+
+ def getClassKey(self):
+ return self.classKey
+
+ def getArguments(self):
+ return self.arguments
+
+ def getTimestamp(self):
+ return self.timestamp
+
+ def getName(self):
+ return self.name
+
+ def getSchema(self):
+ return self.schema
+
+class SequenceManager:
+ """ Manage sequence numbers for asynchronous method calls """
+ def __init__(self):
+ self.lock = Lock()
+ self.sequence = 0
+ self.pending = {}
+
+ def _reserve(self, data):
+ """ Reserve a unique sequence number """
+ try:
+ self.lock.acquire()
+ result = self.sequence
+ self.sequence = self.sequence + 1
+ self.pending[result] = data
+ finally:
+ self.lock.release()
+ return result
+
+ def _release(self, seq):
+ """ Release a reserved sequence number """
+ data = None
+ try:
+ self.lock.acquire()
+ if seq in self.pending:
+ data = self.pending[seq]
+ del self.pending[seq]
+ finally:
+ self.lock.release()
+ return data
+
+
+class DebugConsole(Console):
+ """ """
+ def brokerConnected(self, broker):
+ print "brokerConnected:", broker
+
+ def brokerDisconnected(self, broker):
+ print "brokerDisconnected:", broker
+
+ def newPackage(self, name):
+ print "newPackage:", name
+
+ def newClass(self, kind, classKey):
+ print "newClass:", kind, classKey
+
+ def newAgent(self, agent):
+ print "newAgent:", agent
+
+ def delAgent(self, agent):
+ print "delAgent:", agent
+
+ def objectProps(self, broker, record):
+ print "objectProps:", record
+
+ def objectStats(self, broker, record):
+ print "objectStats:", record
+
+ def event(self, broker, event):
+ print "event:", event
+
+ def heartbeat(self, agent, timestamp):
+ print "heartbeat:", agent
+
+ def brokerInfo(self, broker):
+ print "brokerInfo:", broker
+
diff --git a/python/qpid-python-test b/python/qpid-python-test
new file mode 100755
index 0000000000..b569020368
--- /dev/null
+++ b/python/qpid-python-test
@@ -0,0 +1,575 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# TODO: summarize, test harness preconditions (e.g. broker is alive)
+
+import logging, optparse, os, struct, sys, traceback, types
+from fnmatch import fnmatchcase as match
+from getopt import GetoptError
+from logging import getLogger, StreamHandler, Formatter, Filter, \
+ WARN, DEBUG, ERROR
+from qpid.harness import Skipped
+from qpid.util import URL
+
+levels = {
+ "DEBUG": DEBUG,
+ "WARN": WARN,
+ "ERROR": ERROR
+ }
+
+sorted_levels = [(v, k) for k, v in levels.items()]
+sorted_levels.sort()
+sorted_levels = [v for k, v in sorted_levels]
+
+parser = optparse.OptionParser(usage="usage: %prog [options] PATTERN ...",
+ description="Run tests matching the specified PATTERNs.")
+parser.add_option("-l", "--list", action="store_true", default=False,
+ help="list tests instead of executing them")
+parser.add_option("-b", "--broker", default="localhost",
+ help="run tests against BROKER (default %default)")
+parser.add_option("-f", "--log-file", metavar="FILE", help="log output to FILE")
+parser.add_option("-v", "--log-level", metavar="LEVEL", default="WARN",
+ help="only display log messages of LEVEL or higher severity: "
+ "%s (default %%default)" % ", ".join(sorted_levels))
+parser.add_option("-c", "--log-category", metavar="CATEGORY", action="append",
+ dest="log_categories", default=[],
+ help="log only categories matching CATEGORY pattern")
+parser.add_option("-m", "--module", action="append", default=[],
+ dest="modules", help="add module to test search path")
+parser.add_option("-i", "--ignore", action="append", default=[],
+ help="ignore tests matching IGNORE pattern")
+parser.add_option("-I", "--ignore-file", metavar="IFILE", action="append",
+ default=[],
+ help="ignore tests matching patterns in IFILE")
+parser.add_option("-H", "--halt-on-error", action="store_true", default=False,
+ dest="hoe", help="halt if an error is encountered")
+parser.add_option("-D", "--define", metavar="DEFINE", dest="defines",
+ action="append", default=[], help="define test parameters")
+
+class Config:
+
+ def __init__(self):
+ self.broker = URL("localhost")
+ self.defines = {}
+ self.log_file = None
+ self.log_level = WARN
+ self.log_categories = []
+
+opts, args = parser.parse_args()
+
+includes = []
+excludes = ["*__*__"]
+config = Config()
+list_only = opts.list
+config.broker = URL(opts.broker)
+for d in opts.defines:
+ try:
+ idx = d.index("=")
+ name = d[:idx]
+ value = d[idx+1:]
+ config.defines[name] = value
+ except ValueError:
+ config.defines[d] = None
+config.log_file = opts.log_file
+config.log_level = levels[opts.log_level.upper()]
+config.log_categories = opts.log_categories
+excludes.extend([v.strip() for v in opts.ignore])
+for v in opts.ignore_file:
+ f = open(v)
+ for line in f:
+ line = line.strip()
+ if line.startswith("#"):
+ continue
+ excludes.append(line)
+ f.close()
+
+for a in args:
+ includes.append(a.strip())
+
+if not includes:
+ if opts.modules:
+ includes.append("*")
+ else:
+ includes.extend(["qpid.tests.*", "tests.*", "tests_0-10.*"])
+
+def is_ignored(path):
+ for p in excludes:
+ if match(path, p):
+ return True
+ return False
+
+def is_included(path):
+ if is_ignored(path):
+ return False
+ for p in includes:
+ if match(path, p):
+ return True
+ return False
+
+def is_smart():
+ return sys.stdout.isatty() and os.environ.get("TERM", "dumb") != "dumb"
+
+try:
+ import fcntl, termios
+
+ def width():
+ if is_smart():
+ s = struct.pack("HHHH", 0, 0, 0, 0)
+ fd_stdout = sys.stdout.fileno()
+ x = fcntl.ioctl(fd_stdout, termios.TIOCGWINSZ, s)
+ rows, cols, xpx, ypx = struct.unpack("HHHH", x)
+ return cols
+ else:
+ try:
+ return int(os.environ.get("COLUMNS", "80"))
+ except ValueError:
+ return 80
+
+ WIDTH = width()
+
+ def resize(sig, frm):
+ global WIDTH
+ WIDTH = width()
+
+ import signal
+ signal.signal(signal.SIGWINCH, resize)
+
+except ImportError:
+ WIDTH = 80
+
+def vt100_attrs(*attrs):
+ return "\x1B[%sm" % ";".join(map(str, attrs))
+
+vt100_reset = vt100_attrs(0)
+
+KEYWORDS = {"pass": (32,),
+ "skip": (33,),
+ "fail": (31,),
+ "start": (34,),
+ "total": (34,),
+ "ignored": (33,),
+ "selected": (34,)}
+
+COLORIZE = is_smart()
+
+def colorize_word(word, text=None):
+ if text is None:
+ text = word
+ return colorize(text, *KEYWORDS.get(word, ()))
+
+def colorize(text, *attrs):
+ if attrs and COLORIZE:
+ return "%s%s%s" % (vt100_attrs(*attrs), text, vt100_reset)
+ else:
+ return text
+
+def indent(text):
+ lines = text.split("\n")
+ return " %s" % "\n ".join(lines)
+
+class Interceptor:
+
+ def __init__(self):
+ self.newline = False
+ self.indent = False
+ self.passthrough = True
+ self.dirty = False
+ self.last = None
+
+ def begin(self):
+ self.newline = True
+ self.indent = True
+ self.passthrough = False
+ self.dirty = False
+ self.last = None
+
+ def reset(self):
+ self.newline = False
+ self.indent = False
+ self.passthrough = True
+
+class StreamWrapper:
+
+ def __init__(self, interceptor, stream, prefix=" "):
+ self.interceptor = interceptor
+ self.stream = stream
+ self.prefix = prefix
+
+ def fileno(self):
+ return self.stream.fileno()
+
+ def isatty(self):
+ return self.stream.isatty()
+
+ def write(self, s):
+ if self.interceptor.passthrough:
+ self.stream.write(s)
+ return
+
+ if s:
+ self.interceptor.dirty = True
+
+ if self.interceptor.newline:
+ self.interceptor.newline = False
+ self.stream.write(" %s\n" % colorize_word("start"))
+ self.interceptor.indent = True
+ if self.interceptor.indent:
+ self.stream.write(self.prefix)
+ if s.endswith("\n"):
+ s = s.replace("\n", "\n%s" % self.prefix)[:-2]
+ self.interceptor.indent = True
+ else:
+ s = s.replace("\n", "\n%s" % self.prefix)
+ self.interceptor.indent = False
+ self.stream.write(s)
+
+ if s:
+ self.interceptor.last = s[-1]
+
+ def flush(self):
+ self.stream.flush()
+
+interceptor = Interceptor()
+
+out_wrp = StreamWrapper(interceptor, sys.stdout)
+err_wrp = StreamWrapper(interceptor, sys.stderr)
+
+out = sys.stdout
+err = sys.stderr
+sys.stdout = out_wrp
+sys.stderr = err_wrp
+
+class PatternFilter(Filter):
+
+ def __init__(self, *patterns):
+ Filter.__init__(self, patterns)
+ self.patterns = patterns
+
+ def filter(self, record):
+ if not self.patterns:
+ return True
+ for p in self.patterns:
+ if match(record.name, p):
+ return True
+ return False
+
+root = getLogger()
+handler = StreamHandler(sys.stdout)
+filter = PatternFilter(*config.log_categories)
+handler.addFilter(filter)
+handler.setFormatter(Formatter("%(asctime)s %(levelname)s %(message)s"))
+root.addHandler(handler)
+root.setLevel(WARN)
+
+log = getLogger("qpid.test")
+
+PASS = "pass"
+SKIP = "skip"
+FAIL = "fail"
+
+class Runner:
+
+ def __init__(self):
+ self.exceptions = []
+ self.skip = False
+
+ def passed(self):
+ return not self.exceptions
+
+ def skipped(self):
+ return self.skip
+
+ def failed(self):
+ return self.exceptions and not self.skip
+
+ def halt(self):
+ return self.exceptions or self.skip
+
+ def run(self, name, phase):
+ try:
+ phase()
+ except KeyboardInterrupt:
+ raise
+ except:
+ exi = sys.exc_info()
+ if issubclass(exi[0], Skipped):
+ self.skip = True
+ self.exceptions.append((name, exi))
+
+ def status(self):
+ if self.passed():
+ return PASS
+ elif self.skipped():
+ return SKIP
+ elif self.failed():
+ return FAIL
+ else:
+ return None
+
+ def print_exceptions(self):
+ for name, info in self.exceptions:
+ if issubclass(info[0], Skipped):
+ print indent("".join(traceback.format_exception_only(*info[:2]))).rstrip()
+ else:
+ print "Error during %s:" % name
+ print indent("".join(traceback.format_exception(*info))).rstrip()
+
+ST_WIDTH = 8
+
+def run_test(name, test, config):
+ patterns = filter.patterns
+ level = root.level
+ filter.patterns = config.log_categories
+ root.setLevel(config.log_level)
+
+ parts = name.split(".")
+ line = None
+ output = ""
+ for part in parts:
+ if line:
+ if len(line) + len(part) >= (WIDTH - ST_WIDTH - 1):
+ output += "%s. \\\n" % line
+ line = " %s" % part
+ else:
+ line = "%s.%s" % (line, part)
+ else:
+ line = part
+
+ if line:
+ output += "%s %s" % (line, (((WIDTH - ST_WIDTH) - len(line))*"."))
+ sys.stdout.write(output)
+ sys.stdout.flush()
+ interceptor.begin()
+ try:
+ runner = test()
+ finally:
+ interceptor.reset()
+ if interceptor.dirty:
+ if interceptor.last != "\n":
+ sys.stdout.write("\n")
+ sys.stdout.write(output)
+ print " %s" % colorize_word(runner.status())
+ if runner.failed() or runner.skipped():
+ runner.print_exceptions()
+ root.setLevel(level)
+ filter.patterns = patterns
+ return runner.status()
+
+class FunctionTest:
+
+ def __init__(self, test):
+ self.test = test
+
+ def name(self):
+ return "%s.%s" % (self.test.__module__, self.test.__name__)
+
+ def run(self):
+ return run_test(self.name(), self._run, config)
+
+ def _run(self):
+ runner = Runner()
+ runner.run("test", lambda: self.test(config))
+ return runner
+
+ def __repr__(self):
+ return "FunctionTest(%r)" % self.test
+
+class MethodTest:
+
+ def __init__(self, cls, method):
+ self.cls = cls
+ self.method = method
+
+ def name(self):
+ return "%s.%s.%s" % (self.cls.__module__, self.cls.__name__, self.method)
+
+ def run(self):
+ return run_test(self.name(), self._run, config)
+
+ def _run(self):
+ runner = Runner()
+ inst = self.cls(self.method)
+ test = getattr(inst, self.method)
+
+ if hasattr(inst, "configure"):
+ runner.run("configure", lambda: inst.configure(config))
+ if runner.halt(): return runner
+ if hasattr(inst, "setUp"):
+ runner.run("setup", inst.setUp)
+ if runner.halt(): return runner
+ elif hasattr(inst, "setup"):
+ runner.run("setup", inst.setup)
+ if runner.halt(): return runner
+
+ runner.run("test", test)
+
+ if hasattr(inst, "tearDown"):
+ runner.run("teardown", inst.tearDown)
+ elif hasattr(inst, "teardown"):
+ runner.run("teardown", inst.teardown)
+
+ return runner
+
+ def __repr__(self):
+ return "MethodTest(%r, %r)" % (self.cls, self.method)
+
+class PatternMatcher:
+
+ def __init__(self, *patterns):
+ self.patterns = patterns
+
+ def matches(self, name):
+ for p in self.patterns:
+ if match(name, p):
+ return True
+ return False
+
+class FunctionScanner(PatternMatcher):
+
+ def inspect(self, obj):
+ return type(obj) == types.FunctionType and self.matches(name)
+
+ def descend(self, func):
+ # the None is required for older versions of python
+ return; yield None
+
+ def extract(self, func):
+ yield FunctionTest(func)
+
+class ClassScanner(PatternMatcher):
+
+ def inspect(self, obj):
+ return type(obj) in (types.ClassType, types.TypeType) and self.matches(obj.__name__)
+
+ def descend(self, cls):
+ # the None is required for older versions of python
+ return; yield None
+
+ def extract(self, cls):
+ names = dir(cls)
+ names.sort()
+ for name in names:
+ obj = getattr(cls, name)
+ t = type(obj)
+ if t == types.MethodType and name.startswith("test"):
+ yield MethodTest(cls, name)
+
+class ModuleScanner:
+
+ def inspect(self, obj):
+ return type(obj) == types.ModuleType
+
+ def descend(self, obj):
+ names = dir(obj)
+ names.sort()
+ for name in names:
+ yield getattr(obj, name)
+
+ def extract(self, obj):
+ # the None is required for older versions of python
+ return; yield None
+
+class Harness:
+
+ def __init__(self):
+ self.scanners = [
+ ModuleScanner(),
+ ClassScanner("*Test", "*Tests", "*TestCase"),
+ FunctionScanner("test_*")
+ ]
+ self.tests = []
+ self.scanned = []
+
+ def scan(self, *roots):
+ objects = list(roots)
+
+ while objects:
+ obj = objects.pop(0)
+ for s in self.scanners:
+ if s.inspect(obj):
+ self.tests.extend(s.extract(obj))
+ for child in s.descend(obj):
+ if not (child in self.scanned or child in objects):
+ objects.append(child)
+ self.scanned.append(obj)
+
+modules = opts.modules
+if not modules:
+ modules.extend(["qpid.tests", "tests", "tests_0-8", "tests_0-9", "tests_0-10"])
+h = Harness()
+for name in modules:
+ m = __import__(name, None, None, ["dummy"])
+ h.scan(m)
+
+filtered = [t for t in h.tests if is_included(t.name())]
+ignored = [t for t in h.tests if is_ignored(t.name())]
+total = len(filtered) + len(ignored)
+
+passed = 0
+failed = 0
+skipped = 0
+for t in filtered:
+ if list_only:
+ print t.name()
+ else:
+ st = t.run()
+ if st == PASS:
+ passed += 1
+ elif st == SKIP:
+ skipped += 1
+ elif st == FAIL:
+ failed += 1
+ if opts.hoe:
+ break
+
+run = passed + failed
+
+if not list_only:
+ if passed:
+ _pass = "pass"
+ else:
+ _pass = "fail"
+ if failed:
+ outcome = "fail"
+ else:
+ outcome = "pass"
+ if ignored:
+ ign = "ignored"
+ else:
+ ign = "pass"
+ if skipped:
+ skip = "skip"
+ else:
+ skip = "pass"
+ print colorize("Totals:", 1), \
+ colorize_word("total", "%s tests" % total) + ",", \
+ colorize_word(_pass, "%s passed" % passed) + ",", \
+ colorize_word(skip, "%s skipped" % skipped) + ",", \
+ colorize_word(ign, "%s ignored" % len(ignored)) + ",", \
+ colorize_word(outcome, "%s failed" % failed),
+ if opts.hoe and failed > 0:
+ print " -- (halted after %s)" % run
+ else:
+ print
+
+if failed or skipped:
+ sys.exit(1)
+else:
+ sys.exit(0)
diff --git a/python/qpid/address.py b/python/qpid/address.py
new file mode 100644
index 0000000000..6228ac757b
--- /dev/null
+++ b/python/qpid/address.py
@@ -0,0 +1,161 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import re
+from lexer import Lexicon, LexError
+from parser import Parser, ParseError
+
+l = Lexicon()
+
+LBRACE = l.define("LBRACE", r"\{")
+RBRACE = l.define("RBRACE", r"\}")
+LBRACK = l.define("LBRACK", r"\[")
+RBRACK = l.define("RBRACK", r"\]")
+COLON = l.define("COLON", r":")
+SEMI = l.define("SEMI", r";")
+SLASH = l.define("SLASH", r"/")
+COMMA = l.define("COMMA", r",")
+NUMBER = l.define("NUMBER", r'[+-]?[0-9]*\.?[0-9]+')
+ID = l.define("ID", r'[a-zA-Z_](?:[a-zA-Z0-9_-]*[a-zA-Z0-9_])?')
+STRING = l.define("STRING", r""""(?:[^\\"]|\\.)*"|'(?:[^\\']|\\.)*'""")
+ESC = l.define("ESC", r"\\[^ux]|\\x[0-9a-fA-F][0-9a-fA-F]|\\u[0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F]")
+SYM = l.define("SYM", r"[.#*%@$^!+-]")
+WSPACE = l.define("WSPACE", r"[ \n\r\t]+")
+EOF = l.eof("EOF")
+
+LEXER = l.compile()
+
+def lex(st):
+ return LEXER.lex(st)
+
+def tok2str(tok):
+ if tok.type is STRING:
+ return eval(tok.value)
+ elif tok.type is ESC:
+ if tok.value[1] == "x":
+ return eval('"%s"' % tok.value)
+ elif tok.value[1] == "u":
+ return eval('u"%s"' % tok.value)
+ else:
+ return tok.value[1]
+ else:
+ return tok.value
+
+def tok2obj(tok):
+ if tok.type in (STRING, NUMBER):
+ return eval(tok.value)
+ else:
+ return tok.value
+
+def toks2str(toks):
+ if toks:
+ return "".join(map(tok2str, toks))
+ else:
+ return None
+
+class AddressParser(Parser):
+
+ def __init__(self, tokens):
+ Parser.__init__(self, [t for t in tokens if t.type is not WSPACE])
+
+ def parse(self):
+ result = self.address()
+ self.eat(EOF)
+ return result
+
+ def address(self):
+ name = toks2str(self.eat_until(SLASH, SEMI, EOF))
+
+ if name is None:
+ raise ParseError(self.next())
+
+ if self.matches(SLASH):
+ self.eat(SLASH)
+ subject = toks2str(self.eat_until(SEMI, EOF))
+ else:
+ subject = None
+
+ if self.matches(SEMI):
+ self.eat(SEMI)
+ options = self.map()
+ else:
+ options = None
+ return name, subject, options
+
+ def map(self):
+ self.eat(LBRACE)
+
+ result = {}
+ while True:
+ if self.matches(ID):
+ n, v = self.nameval()
+ result[n] = v
+ if self.matches(COMMA):
+ self.eat(COMMA)
+ elif self.matches(RBRACE):
+ break
+ else:
+ raise ParseError(self.next(), COMMA, RBRACE)
+ elif self.matches(RBRACE):
+ break
+ else:
+ raise ParseError(self.next(), ID, RBRACE)
+
+ self.eat(RBRACE)
+ return result
+
+ def nameval(self):
+ name = self.eat(ID).value
+ self.eat(COLON)
+ val = self.value()
+ return (name, val)
+
+ def value(self):
+ if self.matches(NUMBER, STRING, ID):
+ return tok2obj(self.eat())
+ elif self.matches(LBRACE):
+ return self.map()
+ elif self.matches(LBRACK):
+ return self.list()
+ else:
+ raise ParseError(self.next(), NUMBER, STRING, ID, LBRACE, LBRACK)
+
+ def list(self):
+ self.eat(LBRACK)
+
+ result = []
+
+ while True:
+ if self.matches(RBRACK):
+ break
+ else:
+ result.append(self.value())
+ if self.matches(COMMA):
+ self.eat(COMMA)
+ elif self.matches(RBRACK):
+ break
+ else:
+ raise ParseError(self.next(), COMMA, RBRACK)
+
+ self.eat(RBRACK)
+ return result
+
+def parse(addr):
+ return AddressParser(lex(addr)).parse()
+
+__all__ = ["parse", "ParseError"]
diff --git a/python/qpid/assembler.py b/python/qpid/assembler.py
deleted file mode 100644
index 92bb0aa0f8..0000000000
--- a/python/qpid/assembler.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from codec010 import StringCodec
-from framer import *
-from logging import getLogger
-
-log = getLogger("qpid.io.seg")
-
-class Segment:
-
- def __init__(self, first, last, type, track, channel, payload):
- self.id = None
- self.offset = None
- self.first = first
- self.last = last
- self.type = type
- self.track = track
- self.channel = channel
- self.payload = payload
-
- def decode(self, spec):
- segs = spec["segment_type"]
- choice = segs.choices[self.type]
- return getattr(self, "decode_%s" % choice.name)(spec)
-
- def decode_control(self, spec):
- sc = StringCodec(spec, self.payload)
- return sc.read_control()
-
- def decode_command(self, spec):
- sc = StringCodec(spec, self.payload)
- hdr, cmd = sc.read_command()
- cmd.id = self.id
- return hdr, cmd
-
- def decode_header(self, spec):
- sc = StringCodec(spec, self.payload)
- values = []
- while len(sc.encoded) > 0:
- values.append(sc.read_struct32())
- return values
-
- def decode_body(self, spec):
- return self.payload
-
- def __str__(self):
- return "%s%s %s %s %s %r" % (int(self.first), int(self.last), self.type,
- self.track, self.channel, self.payload)
-
- def __repr__(self):
- return str(self)
-
-class Assembler(Framer):
-
- def __init__(self, sock, max_payload = Frame.MAX_PAYLOAD):
- Framer.__init__(self, sock)
- self.max_payload = max_payload
- self.fragments = {}
-
- def read_segment(self):
- while True:
- frame = self.read_frame()
-
- key = (frame.channel, frame.track)
- seg = self.fragments.get(key)
- if seg == None:
- seg = Segment(frame.isFirstSegment(), frame.isLastSegment(),
- frame.type, frame.track, frame.channel, "")
- self.fragments[key] = seg
-
- seg.payload += frame.payload
-
- if frame.isLastFrame():
- self.fragments.pop(key)
- log.debug("RECV %s", seg)
- return seg
-
- def write_segment(self, segment):
- remaining = segment.payload
-
- first = True
- while first or remaining:
- payload = remaining[:self.max_payload]
- remaining = remaining[self.max_payload:]
-
- flags = 0
- if first:
- flags |= FIRST_FRM
- first = False
- if not remaining:
- flags |= LAST_FRM
- if segment.first:
- flags |= FIRST_SEG
- if segment.last:
- flags |= LAST_SEG
-
- frame = Frame(flags, segment.type, segment.track, segment.channel,
- payload)
- self.write_frame(frame)
-
- log.debug("SENT %s", segment)
diff --git a/python/qpid/brokertest.py b/python/qpid/brokertest.py
new file mode 100644
index 0000000000..83d6c44d84
--- /dev/null
+++ b/python/qpid/brokertest.py
@@ -0,0 +1,480 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Support library for tests that start multiple brokers, e.g. cluster
+# or federation
+
+import os, signal, string, tempfile, popen2, socket, threading, time, imp
+import qpid, traceback
+from qpid import connection, messaging, util
+from qpid.compat import format_exc
+from qpid.harness import Skipped
+from unittest import TestCase
+from copy import copy
+from threading import Thread, Lock, Condition
+from logging import getLogger
+
+log = getLogger("qpid.brokertest")
+
+# Values for expected outcome of process at end of test
+EXPECT_EXIT_OK=1 # Expect to exit with 0 status before end of test.
+EXPECT_EXIT_FAIL=2 # Expect to exit with non-0 status before end of test.
+EXPECT_RUNNING=3 # Expect to still be running at end of test
+
+def is_running(pid):
+ try:
+ os.kill(pid, 0)
+ return True
+ except:
+ return False
+
+class BadProcessStatus(Exception):
+ pass
+
+class ExceptionWrapper:
+ """Proxy object that adds a message to exceptions raised"""
+ def __init__(self, obj, msg):
+ self.obj = obj
+ self.msg = msg
+
+ def __getattr__(self, name):
+ func = getattr(self.obj, name)
+ return lambda *args, **kwargs: self._wrap(func, args, kwargs)
+
+ def _wrap(self, func, args, kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception, e:
+ raise Exception("%s: %s" %(self.msg, str(e)))
+
+def error_line(f):
+ try:
+ lines = file(f).readlines()
+ if len(lines) > 0: return ": %s" % (lines[-1])
+ except: pass
+ return ""
+
+
+class Popen(popen2.Popen3):
+ """
+ Similar to subprocess.Popen but using popen2 classes for portability.
+ Can set and verify expectation of process status at end of test.
+ Dumps command line, stdout, stderr to data dir for debugging.
+ """
+
+ def __init__(self, cmd, expect=EXPECT_EXIT_OK):
+ if type(cmd) is type(""): cmd = [cmd] # Make it a list.
+ self.cmd = [ str(x) for x in cmd ]
+ popen2.Popen3.__init__(self, self.cmd, True)
+ self.expect = expect
+ self.pname = "%s-%d" % (os.path.split(self.cmd[0])[-1], self.pid)
+ msg = "Process %s" % self.pname
+ self.stdin = ExceptionWrapper(self.tochild, msg)
+ self.stdout = ExceptionWrapper(self.fromchild, msg)
+ self.stderr = ExceptionWrapper(self.childerr, msg)
+ self.dump(self.cmd_str(), "cmd")
+ log.debug("Started process %s" % self.pname)
+
+ def dump(self, str, ext):
+ name = "%s.%s" % (self.pname, ext)
+ f = file(name, "w")
+ f.write(str)
+ f.close()
+ return name
+
+ def unexpected(self,msg):
+ self.dump(self.stdout.read(), "out")
+ err = self.dump(self.stderr.read(), "err")
+ raise BadProcessStatus("%s %s%s" % (self.pname, msg, error_line(err)))
+
+ def stop(self): # Clean up at end of test.
+ if self.expect == EXPECT_RUNNING:
+ try:
+ self.kill()
+ except:
+ self.unexpected("expected running, exit code %d" % self.wait())
+ else:
+ # Give the process some time to exit.
+ delay = 0.1
+ while (self.poll() is None and delay < 1):
+ time.sleep(delay)
+ delay *= 2
+ if self.returncode is None: # Still haven't stopped
+ self.kill()
+ self.unexpected("still running")
+ elif self.expect == EXPECT_EXIT_OK and self.returncode != 0:
+ self.unexpected("exit code %d" % self.returncode)
+ elif self.expect == EXPECT_EXIT_FAIL and self.returncode == 0:
+ self.unexpected("expected error")
+
+ def communicate(self, input=None):
+ if input:
+ self.stdin.write(input)
+ self.stdin.close()
+ outerr = (self.stdout.read(), self.stderr.read())
+ self.wait()
+ return outerr
+
+ def is_running(self): return self.poll() is None
+
+ def assert_running(self):
+ if not self.is_running(): unexpected("Exit code %d" % self.returncode)
+
+ def poll(self):
+ self.returncode = popen2.Popen3.poll(self)
+ if (self.returncode == -1): self.returncode = None
+ return self.returncode
+
+ def wait(self):
+ self.returncode = popen2.Popen3.wait(self)
+ return self.returncode
+
+ def send_signal(self, sig):
+ os.kill(self.pid,sig)
+ self.wait()
+
+ def terminate(self): self.send_signal(signal.SIGTERM)
+ def kill(self): self.send_signal(signal.SIGKILL)
+
+ def cmd_str(self): return " ".join([str(s) for s in self.cmd])
+
+def checkenv(name):
+ value = os.getenv(name)
+ if not value: raise Exception("Environment variable %s is not set" % name)
+ return value
+
+class Broker(Popen):
+ "A broker process. Takes care of start, stop and logging."
+ _broker_count = 0
+
+ def __init__(self, test, args=[], name=None, expect=EXPECT_RUNNING):
+ """Start a broker daemon. name determines the data-dir and log
+ file names."""
+
+ self.test = test
+ self._port = None
+ cmd = [BrokerTest.qpidd_exec, "--port=0", "--no-module-dir", "--auth=no"] + args
+ if name: self.name = name
+ else:
+ self.name = "broker%d" % Broker._broker_count
+ Broker._broker_count += 1
+ self.log = self.name+".log"
+ cmd += ["--log-to-file", self.log, "--log-prefix", self.name]
+ cmd += ["--log-to-stderr=no"]
+ self.datadir = self.name
+ cmd += ["--data-dir", self.datadir]
+ Popen.__init__(self, cmd, expect)
+ test.cleanup_stop(self)
+ self.host = "localhost"
+ log.debug("Started broker %s (%s)" % (self.name, self.pname))
+
+ def port(self):
+ # Read port from broker process stdout if not already read.
+ if (self._port is None):
+ try: self._port = int(self.stdout.readline())
+ except ValueError, e:
+ raise Exception("Can't get port for broker %s (%s)%s" %
+ (self.name, self.pname, error_line(self.log)))
+ return self._port
+
+ def unexpected(self,msg):
+ raise BadProcessStatus("%s: %s (%s)" % (msg, self.name, self.pname))
+
+ def connect(self):
+ """New API connection to the broker."""
+ return messaging.Connection.open(self.host, self.port())
+
+ def connect_old(self):
+ """Old API connection to the broker."""
+ socket = qpid.util.connect(self.host,self.port())
+ connection = qpid.connection.Connection (sock=socket)
+ connection.start()
+ return connection;
+
+ def declare_queue(self, queue):
+ c = self.connect_old()
+ s = c.session(str(qpid.datatypes.uuid4()))
+ s.queue_declare(queue=queue)
+ c.close()
+
+ def send_message(self, queue, message):
+ s = self.connect().session()
+ s.sender(queue+"; {create:always}").send(message)
+ s.connection.close()
+
+ def send_messages(self, queue, messages):
+ s = self.connect().session()
+ sender = s.sender(queue+"; {create:always}")
+ for m in messages: sender.send(m)
+ s.connection.close()
+
+ def get_message(self, queue):
+ s = self.connect().session()
+ m = s.receiver(queue+"; {create:always}", capacity=1).fetch(timeout=1)
+ s.acknowledge()
+ s.connection.close()
+ return m
+
+ def get_messages(self, queue, n):
+ s = self.connect().session()
+ receiver = s.receiver(queue+"; {create:always}", capacity=n)
+ m = [receiver.fetch(timeout=1) for i in range(n)]
+ s.acknowledge()
+ s.connection.close()
+ return m
+
+ def host_port(self): return "%s:%s" % (self.host, self.port())
+
+
+class Cluster:
+ """A cluster of brokers in a test."""
+
+ _cluster_count = 0
+
+ def __init__(self, test, count=0, args=[], expect=EXPECT_RUNNING, wait=True):
+ self.test = test
+ self._brokers=[]
+ self.name = "cluster%d" % Cluster._cluster_count
+ Cluster._cluster_count += 1
+ # Use unique cluster name
+ self.args = copy(args)
+ self.args += [ "--cluster-name", "%s-%s:%d" % (self.name, socket.gethostname(), os.getpid()) ]
+ assert BrokerTest.cluster_lib
+ self.args += [ "--load-module", BrokerTest.cluster_lib ]
+ self.start_n(count, expect=expect, wait=wait)
+
+ def start(self, name=None, expect=EXPECT_RUNNING, wait=True, args=[]):
+ """Add a broker to the cluster. Returns the index of the new broker."""
+ if not name: name="%s-%d" % (self.name, len(self._brokers))
+ log.debug("Cluster %s starting member %s" % (self.name, name))
+ self._brokers.append(self.test.broker(self.args+args, name, expect, wait))
+ return self._brokers[-1]
+
+ def start_n(self, count, expect=EXPECT_RUNNING, wait=True, args=[]):
+ for i in range(count): self.start(expect=expect, wait=wait, args=args)
+
+ # Behave like a list of brokers.
+ def __len__(self): return len(self._brokers)
+ def __getitem__(self,index): return self._brokers[index]
+ def __iter__(self): return self._brokers.__iter__()
+
+class BrokerTest(TestCase):
+ """
+ Tracks processes started by test and kills at end of test.
+ Provides a well-known working directory for each test.
+ """
+
+ # Environment settings.
+ qpidd_exec = checkenv("QPIDD_EXEC")
+ cluster_lib = os.getenv("CLUSTER_LIB")
+ xml_lib = os.getenv("XML_LIB")
+ qpidConfig_exec = os.getenv("QPID_CONFIG_EXEC")
+ qpidRoute_exec = os.getenv("QPID_ROUTE_EXEC")
+ receiver_exec = os.getenv("RECEIVER_EXEC")
+ sender_exec = os.getenv("SENDER_EXEC")
+ store_lib = os.getenv("STORE_LIB")
+ test_store_lib = os.getenv("TEST_STORE_LIB")
+ rootdir = os.getcwd()
+
+ def configure(self, config): self.config=config
+
+ def setUp(self):
+ outdir = self.config.defines.get("OUTDIR") or "brokertest.tmp"
+ self.dir = os.path.join(self.rootdir, outdir, self.id())
+ os.makedirs(self.dir)
+ os.chdir(self.dir)
+ self.stopem = [] # things to stop at end of test
+
+ def tearDown(self):
+ err = []
+ for p in self.stopem:
+ try: p.stop()
+ except Exception, e: err.append(str(e))
+ if err: raise Exception("Unexpected process status:\n "+"\n ".join(err))
+
+ def cleanup_stop(self, stopable):
+ """Call thing.stop at end of test"""
+ self.stopem.append(stopable)
+
+ def popen(self, cmd, expect=EXPECT_EXIT_OK):
+ """Start a process that will be killed at end of test, in the test dir."""
+ os.chdir(self.dir)
+ p = Popen(cmd, expect)
+ self.cleanup_stop(p)
+ return p
+
+ def broker(self, args=[], name=None, expect=EXPECT_RUNNING,wait=True):
+ """Create and return a broker ready for use"""
+ b = Broker(self, args=args, name=name, expect=expect)
+ if (wait): b.connect().close()
+ return b
+
+ def cluster(self, count=0, args=[], expect=EXPECT_RUNNING, wait=True):
+ """Create and return a cluster ready for use"""
+ cluster = Cluster(self, count, args, expect=expect, wait=wait)
+ return cluster
+
+ def wait():
+ """Wait for all brokers in the cluster to be ready"""
+ for b in _brokers: b.connect().close()
+
+class RethrownException(Exception):
+ """Captures the original stack trace to be thrown later"""
+ def __init__(self, e, msg=""):
+ Exception.__init__(self, msg+"\n"+format_exc())
+
+class StoppableThread(Thread):
+ """
+ Base class for threads that do something in a loop and periodically check
+ to see if they have been stopped.
+ """
+ def __init__(self):
+ self.stopped = False
+ self.error = None
+ Thread.__init__(self)
+
+ def stop(self):
+ self.stopped = True
+ self.join()
+ if self.error: raise self.error
+
+class NumberedSender(Thread):
+ """
+ Thread to run a sender client and send numbered messages until stopped.
+ """
+
+ def __init__(self, broker, max_depth=None):
+ """
+ max_depth: enable flow control, ensure sent - received <= max_depth.
+ Requires self.received(n) to be called each time messages are received.
+ """
+ Thread.__init__(self)
+ self.sender = broker.test.popen(
+ [broker.test.sender_exec, "--port", broker.port()], expect=EXPECT_RUNNING)
+ self.condition = Condition()
+ self.max = max_depth
+ self.received = 0
+ self.stopped = False
+ self.error = None
+
+ def run(self):
+ try:
+ self.sent = 0
+ while not self.stopped:
+ if self.max:
+ self.condition.acquire()
+ while not self.stopped and self.sent - self.received > self.max:
+ self.condition.wait()
+ self.condition.release()
+ self.sender.stdin.write(str(self.sent)+"\n")
+ self.sender.stdin.flush()
+ self.sent += 1
+ except Exception, e: self.error = RethrownException(e, self.sender.pname)
+
+ def notify_received(self, count):
+ """Called by receiver to enable flow control. count = messages received so far."""
+ self.condition.acquire()
+ self.received = count
+ self.condition.notify()
+ self.condition.release()
+
+ def stop(self):
+ self.condition.acquire()
+ self.stopped = True
+ self.condition.notify()
+ self.condition.release()
+ self.join()
+ if self.error: raise self.error
+
+class NumberedReceiver(Thread):
+ """
+ Thread to run a receiver client and verify it receives
+ sequentially numbered messages.
+ """
+ def __init__(self, broker, sender = None):
+ """
+ sender: enable flow control. Call sender.received(n) for each message received.
+ """
+ Thread.__init__(self)
+ self.test = broker.test
+ self.receiver = self.test.popen(
+ [self.test.receiver_exec, "--port", broker.port()], expect=EXPECT_RUNNING)
+ self.stopat = None
+ self.lock = Lock()
+ self.error = None
+ self.sender = sender
+
+ def continue_test(self):
+ self.lock.acquire()
+ ret = self.stopat is None or self.received < self.stopat
+ self.lock.release()
+ return ret
+
+ def run(self):
+ try:
+ self.received = 0
+ while self.continue_test():
+ m = int(self.receiver.stdout.readline())
+ assert(m <= self.received) # Allow for duplicates
+ if (m == self.received):
+ self.received += 1
+ if self.sender:
+ self.sender.notify_received(self.received)
+ except Exception, e:
+ self.error = RethrownException(e, self.receiver.pname)
+
+ def stop(self, count):
+ """Returns when received >= count"""
+ self.lock.acquire()
+ self.stopat = count
+ self.lock.release()
+ self.join()
+ if self.error: raise self.error
+
+class ErrorGenerator(StoppableThread):
+ """
+ Thread that continuously generates errors by trying to consume from
+ a non-existent queue. For cluster regression tests, error handling
+ caused issues in the past.
+ """
+
+ def __init__(self, broker):
+ StoppableThread.__init__(self)
+ self.broker=broker
+ broker.test.cleanup_stop(self)
+ self.start()
+
+ def run(self):
+ c = self.broker.connect_old()
+ try:
+ while not self.stopped:
+ try:
+ c.session(str(qpid.datatypes.uuid4())).message_subscribe(
+ queue="non-existent-queue")
+ assert(False)
+ except qpid.session.SessionException: pass
+ except: pass # Normal if broker is killed.
+
+def import_script(path):
+ """
+ Import executable script at path as a module.
+ Requires some trickery as scripts are not in standard module format
+ """
+ name=os.path.split(path)[1].replace("-","_")
+ return imp.load_module(name, file(path), path, ("", "r", imp.PY_SOURCE))
diff --git a/python/qpid/client.py b/python/qpid/client.py
index 4605710de8..6107a4bc35 100644
--- a/python/qpid/client.py
+++ b/python/qpid/client.py
@@ -39,11 +39,8 @@ class Client:
if spec:
self.spec = spec
else:
- try:
- name = os.environ["AMQP_SPEC"]
- except KeyError:
- raise EnvironmentError("environment variable AMQP_SPEC must be set")
- self.spec = load(name)
+ from qpid_config import amqp_spec_0_9
+ self.spec = load(amqp_spec_0_9)
self.structs = StructFactory(self.spec)
self.sessions = {}
diff --git a/python/qpid/codec010.py b/python/qpid/codec010.py
index dac023e2bd..682743df19 100644
--- a/python/qpid/codec010.py
+++ b/python/qpid/codec010.py
@@ -17,25 +17,69 @@
# under the License.
#
+import datetime
from packer import Packer
-from datatypes import serial, RangedSet, Struct
+from datatypes import serial, timestamp, RangedSet, Struct, UUID
+from ops import Compound, PRIMITIVE, COMPOUND
class CodecException(Exception): pass
+def direct(t):
+ return lambda x: t
+
+def map_str(s):
+ for c in s:
+ if ord(c) >= 0x80:
+ return "vbin16"
+ return "str16"
+
class Codec(Packer):
- def __init__(self, spec):
- self.spec = spec
+ ENCODINGS = {
+ unicode: direct("str16"),
+ str: map_str,
+ buffer: direct("vbin32"),
+ int: direct("int64"),
+ long: direct("int64"),
+ float: direct("double"),
+ None.__class__: direct("void"),
+ list: direct("list"),
+ tuple: direct("list"),
+ dict: direct("map"),
+ timestamp: direct("datetime"),
+ datetime.datetime: direct("datetime"),
+ UUID: direct("uuid"),
+ Compound: direct("struct32")
+ }
+
+ def encoding(self, obj):
+ enc = self._encoding(obj.__class__, obj)
+ if enc is None:
+ raise CodecException("no encoding for %r" % obj)
+ return PRIMITIVE[enc]
+
+ def _encoding(self, klass, obj):
+ if self.ENCODINGS.has_key(klass):
+ return self.ENCODINGS[klass](obj)
+ for base in klass.__bases__:
+ result = self._encoding(base, obj)
+ if result != None:
+ return result
+
+ def read_primitive(self, type):
+ return getattr(self, "read_%s" % type.NAME)()
+ def write_primitive(self, type, v):
+ getattr(self, "write_%s" % type.NAME)(v)
- def write_void(self, v):
- assert v == None
def read_void(self):
return None
+ def write_void(self, v):
+ assert v == None
- def write_bit(self, b):
- if not b: raise ValueError(b)
def read_bit(self):
return True
+ def write_bit(self, b):
+ if not b: raise ValueError(b)
def read_uint8(self):
return self.unpack("!B")
@@ -68,7 +112,7 @@ class Codec(Packer):
def read_int16(self):
return self.unpack("!h")
def write_int16(self, n):
- return self.unpack("!h", n)
+ self.pack("!h", n)
def read_uint32(self):
@@ -103,9 +147,11 @@ class Codec(Packer):
self.pack("!q", n)
def read_datetime(self):
- return self.read_uint64()
- def write_datetime(self, n):
- self.write_uint64(n)
+ return timestamp(self.read_uint64())
+ def write_datetime(self, t):
+ if isinstance(t, datetime.datetime):
+ t = timestamp(t)
+ self.write_uint64(t)
def read_double(self):
return self.unpack("!d")
@@ -115,6 +161,8 @@ class Codec(Packer):
def read_vbin8(self):
return self.read(self.read_uint8())
def write_vbin8(self, b):
+ if isinstance(b, buffer):
+ b = str(b)
self.write_uint8(len(b))
self.write(b)
@@ -128,10 +176,17 @@ class Codec(Packer):
def write_str16(self, s):
self.write_vbin16(s.encode("utf8"))
+ def read_str16_latin(self):
+ return self.read_vbin16().decode("iso-8859-15")
+ def write_str16_latin(self, s):
+ self.write_vbin16(s.encode("iso-8859-15"))
+
def read_vbin16(self):
return self.read(self.read_uint16())
def write_vbin16(self, b):
+ if isinstance(b, buffer):
+ b = str(b)
self.write_uint16(len(b))
self.write(b)
@@ -155,23 +210,13 @@ class Codec(Packer):
def read_vbin32(self):
return self.read(self.read_uint32())
def write_vbin32(self, b):
+ if isinstance(b, buffer):
+ b = str(b)
self.write_uint32(len(b))
self.write(b)
- def write_map(self, m):
- sc = StringCodec(self.spec)
- if m is not None:
- sc.write_uint32(len(m))
- for k, v in m.items():
- type = self.spec.encoding(v.__class__)
- if type == None:
- raise CodecException("no encoding for %s" % v.__class__)
- sc.write_str8(k)
- sc.write_uint8(type.code)
- type.encode(sc, v)
- self.write_vbin32(sc.encoded)
def read_map(self):
- sc = StringCodec(self.spec, self.read_vbin32())
+ sc = StringCodec(self.read_vbin32())
if not sc.encoded:
return None
count = sc.read_uint32()
@@ -179,105 +224,146 @@ class Codec(Packer):
while sc.encoded:
k = sc.read_str8()
code = sc.read_uint8()
- type = self.spec.types[code]
- v = type.decode(sc)
+ type = PRIMITIVE[code]
+ v = sc.read_primitive(type)
result[k] = v
return result
+ def write_map(self, m):
+ sc = StringCodec()
+ if m is not None:
+ sc.write_uint32(len(m))
+ for k, v in m.items():
+ type = self.encoding(v)
+ sc.write_str8(k)
+ sc.write_uint8(type.CODE)
+ sc.write_primitive(type, v)
+ self.write_vbin32(sc.encoded)
+ def read_array(self):
+ sc = StringCodec(self.read_vbin32())
+ if not sc.encoded:
+ return None
+ type = PRIMITIVE[sc.read_uint8()]
+ count = sc.read_uint32()
+ result = []
+ while count > 0:
+ result.append(sc.read_primitive(type))
+ count -= 1
+ return result
def write_array(self, a):
- sc = StringCodec(self.spec)
+ sc = StringCodec()
if a is not None:
if len(a) > 0:
- type = self.spec.encoding(a[0].__class__)
+ type = self.encoding(a[0])
else:
- type = self.spec.encoding(None.__class__)
- sc.write_uint8(type.code)
+ type = self.encoding(None)
+ sc.write_uint8(type.CODE)
sc.write_uint32(len(a))
for o in a:
- type.encode(sc, o)
+ sc.write_primitive(type, o)
self.write_vbin32(sc.encoded)
- def read_array(self):
- sc = StringCodec(self.spec, self.read_vbin32())
+
+ def read_list(self):
+ sc = StringCodec(self.read_vbin32())
if not sc.encoded:
return None
- type = self.spec.types[sc.read_uint8()]
count = sc.read_uint32()
result = []
while count > 0:
- result.append(type.decode(sc))
+ type = PRIMITIVE[sc.read_uint8()]
+ result.append(sc.read_primitive(type))
count -= 1
return result
-
def write_list(self, l):
- sc = StringCodec(self.spec)
+ sc = StringCodec()
if l is not None:
sc.write_uint32(len(l))
for o in l:
- type = self.spec.encoding(o.__class__)
- sc.write_uint8(type.code)
- type.encode(sc, o)
+ type = self.encoding(o)
+ sc.write_uint8(type.CODE)
+ sc.write_primitive(type, o)
self.write_vbin32(sc.encoded)
- def read_list(self):
- sc = StringCodec(self.spec, self.read_vbin32())
- if not sc.encoded:
- return None
- count = sc.read_uint32()
- result = []
- while count > 0:
- type = self.spec.types[sc.read_uint8()]
- result.append(type.decode(sc))
- count -= 1
- return result
def read_struct32(self):
size = self.read_uint32()
code = self.read_uint16()
- type = self.spec.structs[code]
- fields = type.decode_fields(self)
- return Struct(type, **fields)
+ cls = COMPOUND[code]
+ op = cls()
+ self.read_fields(op)
+ return op
def write_struct32(self, value):
- sc = StringCodec(self.spec)
- sc.write_uint16(value._type.code)
- value._type.encode_fields(sc, value)
- self.write_vbin32(sc.encoded)
-
- def read_control(self):
- cntrl = self.spec.controls[self.read_uint16()]
- return Struct(cntrl, **cntrl.decode_fields(self))
- def write_control(self, ctrl):
- type = ctrl._type
- self.write_uint16(type.code)
- type.encode_fields(self, ctrl)
-
- def read_command(self):
- type = self.spec.commands[self.read_uint16()]
- hdr = self.spec["session.header"].decode(self)
- cmd = Struct(type, **type.decode_fields(self))
- return hdr, cmd
- def write_command(self, hdr, cmd):
- self.write_uint16(cmd._type.code)
- hdr._type.encode(self, hdr)
- cmd._type.encode_fields(self, cmd)
+ self.write_compound(value)
+
+ def read_compound(self, cls):
+ size = self.read_size(cls.SIZE)
+ if cls.CODE is not None:
+ code = self.read_uint16()
+ assert code == cls.CODE
+ op = cls()
+ self.read_fields(op)
+ return op
+ def write_compound(self, op):
+ sc = StringCodec()
+ if op.CODE is not None:
+ sc.write_uint16(op.CODE)
+ sc.write_fields(op)
+ self.write_size(op.SIZE, len(sc.encoded))
+ self.write(sc.encoded)
+
+ def read_fields(self, op):
+ flags = 0
+ for i in range(op.PACK):
+ flags |= (self.read_uint8() << 8*i)
+
+ for i in range(len(op.FIELDS)):
+ f = op.FIELDS[i]
+ if flags & (0x1 << i):
+ if COMPOUND.has_key(f.type):
+ value = self.read_compound(COMPOUND[f.type])
+ else:
+ value = getattr(self, "read_%s" % f.type)()
+ setattr(op, f.name, value)
+ def write_fields(self, op):
+ flags = 0
+ for i in range(len(op.FIELDS)):
+ f = op.FIELDS[i]
+ value = getattr(op, f.name)
+ if f.type == "bit":
+ present = value
+ else:
+ present = value != None
+ if present:
+ flags |= (0x1 << i)
+ for i in range(op.PACK):
+ self.write_uint8((flags >> 8*i) & 0xFF)
+ for i in range(len(op.FIELDS)):
+ f = op.FIELDS[i]
+ if flags & (0x1 << i):
+ if COMPOUND.has_key(f.type):
+ enc = self.write_compound
+ else:
+ enc = getattr(self, "write_%s" % f.type)
+ value = getattr(op, f.name)
+ enc(value)
def read_size(self, width):
if width > 0:
attr = "read_uint%d" % (width*8)
return getattr(self, attr)()
-
def write_size(self, width, n):
if width > 0:
attr = "write_uint%d" % (width*8)
getattr(self, attr)(n)
def read_uuid(self):
- return self.unpack("16s")
-
+ return UUID(self.unpack("16s"))
def write_uuid(self, s):
+ if isinstance(s, UUID):
+ s = s.bytes
self.pack("16s", s)
def read_bin128(self):
return self.unpack("16s")
-
def write_bin128(self, b):
self.pack("16s", b)
@@ -285,14 +371,13 @@ class Codec(Packer):
class StringCodec(Codec):
- def __init__(self, spec, encoded = ""):
- Codec.__init__(self, spec)
+ def __init__(self, encoded = ""):
self.encoded = encoded
- def write(self, s):
- self.encoded += s
-
def read(self, n):
result = self.encoded[:n]
self.encoded = self.encoded[n:]
return result
+
+ def write(self, s):
+ self.encoded += s
diff --git a/python/qpid/compat.py b/python/qpid/compat.py
index 26f60fb8aa..c2b668a5e9 100644
--- a/python/qpid/compat.py
+++ b/python/qpid/compat.py
@@ -17,6 +17,8 @@
# under the License.
#
+import sys
+
try:
set = set
except NameError:
@@ -26,3 +28,95 @@ try:
from socket import SHUT_RDWR
except ImportError:
SHUT_RDWR = 2
+
+try:
+ from traceback import format_exc
+except ImportError:
+ import traceback
+ def format_exc():
+ return "".join(traceback.format_exception(*sys.exc_info()))
+
+if tuple(sys.version_info[0:2]) < (2, 4):
+ from select import select as old_select
+ def select(rlist, wlist, xlist, timeout=None):
+ return old_select(list(rlist), list(wlist), list(xlist), timeout)
+else:
+ from select import select
+
+class BaseWaiter:
+
+ def wakeup(self):
+ self._do_write()
+
+ def wait(self, timeout=None):
+ if timeout is not None:
+ ready, _, _ = select([self], [], [], timeout)
+ else:
+ ready = True
+
+ if ready:
+ self._do_read()
+ return True
+ else:
+ return False
+
+ def reading(self):
+ return True
+
+ def readable(self):
+ self._do_read()
+
+if sys.platform in ('win32', 'cygwin'):
+ import socket
+
+ class SockWaiter(BaseWaiter):
+
+ def __init__(self, read_sock, write_sock):
+ self.read_sock = read_sock
+ self.write_sock = write_sock
+
+ def _do_write(self):
+ self.write_sock.send("\0")
+
+ def _do_read(self):
+ self.read_sock.recv(65536)
+
+ def fileno(self):
+ return self.read_sock.fileno()
+
+ def __repr__(self):
+ return "SockWaiter(%r, %r)" % (self.read_sock, self.write_sock)
+
+ def selectable_waiter():
+ listener = socket.socket()
+ listener.bind(('', 0))
+ listener.listen(1)
+ _, port = listener.getsockname()
+ write_sock = socket.socket()
+ write_sock.connect(("127.0.0.1", port))
+ read_sock, _ = listener.accept()
+ listener.close()
+ return SockWaiter(read_sock, write_sock)
+else:
+ import os
+
+ class PipeWaiter(BaseWaiter):
+
+ def __init__(self, read_fd, write_fd):
+ self.read_fd = read_fd
+ self.write_fd = write_fd
+
+ def _do_write(self):
+ os.write(self.write_fd, "\0")
+
+ def _do_read(self):
+ os.read(self.read_fd, 65536)
+
+ def fileno(self):
+ return self.read_fd
+
+ def __repr__(self):
+ return "PipeWaiter(%r, %r)" % (self.read_fd, self.write_fd)
+
+ def selectable_waiter():
+ return PipeWaiter(*os.pipe())
diff --git a/python/qpid/concurrency.py b/python/qpid/concurrency.py
new file mode 100644
index 0000000000..9837a3f0df
--- /dev/null
+++ b/python/qpid/concurrency.py
@@ -0,0 +1,100 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import compat, inspect, time
+
+def synchronized(meth):
+ args, vargs, kwargs, defs = inspect.getargspec(meth)
+ scope = {}
+ scope["meth"] = meth
+ exec """
+def %s%s:
+ %s
+ %s._lock.acquire()
+ try:
+ return meth%s
+ finally:
+ %s._lock.release()
+""" % (meth.__name__, inspect.formatargspec(args, vargs, kwargs, defs),
+ repr(inspect.getdoc(meth)), args[0],
+ inspect.formatargspec(args, vargs, kwargs, defs,
+ formatvalue=lambda x: ""),
+ args[0]) in scope
+ return scope[meth.__name__]
+
+class Waiter(object):
+
+ def __init__(self, condition):
+ self.condition = condition
+
+ def wait(self, predicate, timeout=None):
+ passed = 0
+ start = time.time()
+ while not predicate():
+ if timeout is None:
+ # XXX: this timed wait thing is not necessary for the fast
+ # condition from this module, only for the condition impl from
+ # the threading module
+
+ # using the timed wait prevents keyboard interrupts from being
+ # blocked while waiting
+ self.condition.wait(3)
+ elif passed < timeout:
+ self.condition.wait(timeout - passed)
+ else:
+ return bool(predicate())
+ passed = time.time() - start
+ return True
+
+ def notify(self):
+ self.condition.notify()
+
+ def notifyAll(self):
+ self.condition.notifyAll()
+
+class Condition:
+
+ def __init__(self, lock):
+ self.lock = lock
+ self.waiters = []
+ self.waiting = []
+
+ def notify(self):
+ assert self.lock._is_owned()
+ if self.waiting:
+ self.waiting[0].wakeup()
+
+ def notifyAll(self):
+ assert self.lock._is_owned()
+ for w in self.waiting:
+ w.wakeup()
+
+ def wait(self, timeout=None):
+ assert self.lock._is_owned()
+ if not self.waiters:
+ self.waiters.append(compat.selectable_waiter())
+ sw = self.waiters.pop(0)
+ self.waiting.append(sw)
+ try:
+ st = self.lock._release_save()
+ sw.wait(timeout)
+ finally:
+ self.lock._acquire_restore(st)
+ self.waiting.remove(sw)
+ self.waiters.append(sw)
diff --git a/python/qpid/connection.py b/python/qpid/connection.py
index ce27a74489..18eeb99de8 100644
--- a/python/qpid/connection.py
+++ b/python/qpid/connection.py
@@ -20,15 +20,14 @@
import datatypes, session
from threading import Thread, Condition, RLock
from util import wait, notify
-from assembler import Assembler, Segment
from codec010 import StringCodec
+from framing import *
from session import Session
-from invoker import Invoker
-from spec010 import Control, Command, load
-from spec import default
+from generator import control_invoker
+from spec import SPEC
from exceptions import *
from logging import getLogger
-import delegates
+import delegates, socket
class ChannelBusy(Exception): pass
@@ -44,28 +43,33 @@ def client(*args, **kwargs):
def server(*args, **kwargs):
return delegates.Server(*args, **kwargs)
-class Connection(Assembler):
+from framer import Framer
- def __init__(self, sock, spec=None, delegate=client, **args):
- Assembler.__init__(self, sock)
- if spec == None:
- spec = load(default())
- self.spec = spec
- self.track = self.spec["track"]
+class Connection(Framer):
+ def __init__(self, sock, delegate=client, **args):
+ Framer.__init__(self, sock)
self.lock = RLock()
self.attached = {}
self.sessions = {}
self.condition = Condition()
+ # XXX: we should combine this into a single comprehensive state
+ # model (whatever that means)
self.opened = False
self.failed = False
+ self.closed = False
self.close_code = (None, "connection aborted")
self.thread = Thread(target=self.run)
self.thread.setDaemon(True)
self.channel_max = 65535
+ self.user_id = None
+
+ self.op_enc = OpEncoder()
+ self.seg_enc = SegmentEncoder()
+ self.frame_enc = FrameEncoder()
self.delegate = delegate(self, **args)
@@ -79,7 +83,7 @@ class Connection(Assembler):
else:
ssn = self.sessions.get(name)
if ssn is None:
- ssn = Session(name, self.spec, delegate=delegate)
+ ssn = Session(name, delegate=delegate)
self.sessions[name] = ssn
elif ssn.channel is not None:
if force:
@@ -107,8 +111,7 @@ class Connection(Assembler):
self.lock.release()
def __channel(self):
- # XXX: ch 0?
- for i in xrange(self.channel_max):
+ for i in xrange(1, self.channel_max):
if not self.attached.has_key(i):
return i
else:
@@ -147,15 +150,45 @@ class Connection(Assembler):
raise ConnectionFailed(*self.close_code)
def run(self):
- # XXX: we don't really have a good way to exit this loop without
- # getting the other end to kill the socket
- while True:
+ frame_dec = FrameDecoder()
+ seg_dec = SegmentDecoder()
+ op_dec = OpDecoder()
+
+ while not self.closed:
try:
- seg = self.read_segment()
- except Closed:
+ data = self.sock.recv(64*1024)
+ if self.security_layer_rx and data:
+ status, data = self.security_layer_rx.decode(data)
+ if not data:
+ self.detach_all()
+ break
+ except socket.timeout:
+ if self.aborted():
+ self.detach_all()
+ raise Closed("connection timed out")
+ else:
+ continue
+ except socket.error, e:
self.detach_all()
- break
- self.delegate.received(seg)
+ raise Closed(e)
+ frame_dec.write(data)
+ seg_dec.write(*frame_dec.read())
+ op_dec.write(*seg_dec.read())
+ for op in op_dec.read():
+ self.delegate.received(op)
+ self.sock.close()
+
+ def write_op(self, op):
+ self.sock_lock.acquire()
+ try:
+ self.op_enc.write(op)
+ self.seg_enc.write(*self.op_enc.read())
+ self.frame_enc.write(*self.seg_enc.read())
+ bytes = self.frame_enc.read()
+ self.write(bytes)
+ self.flush()
+ finally:
+ self.sock_lock.release()
def close(self, timeout=None):
if not self.opened: return
@@ -172,26 +205,17 @@ class Connection(Assembler):
log = getLogger("qpid.io.ctl")
-class Channel(Invoker):
+class Channel(control_invoker()):
def __init__(self, connection, id):
self.connection = connection
self.id = id
self.session = None
- def resolve_method(self, name):
- inst = self.connection.spec.instructions.get(name)
- if inst is not None and isinstance(inst, Control):
- return self.METHOD, inst
- else:
- return self.ERROR, None
-
- def invoke(self, type, args, kwargs):
- ctl = type.new(args, kwargs)
- sc = StringCodec(self.connection.spec)
- sc.write_control(ctl)
- self.connection.write_segment(Segment(True, True, type.segment_type,
- type.track, self.id, sc.encoded))
+ def invoke(self, op, args, kwargs):
+ ctl = op(*args, **kwargs)
+ ctl.channel = self.id
+ self.connection.write_op(ctl)
log.debug("SENT %s", ctl)
def __str__(self):
diff --git a/python/qpid/connection08.py b/python/qpid/connection08.py
index 8f2eef4770..d34cfe2847 100644
--- a/python/qpid/connection08.py
+++ b/python/qpid/connection08.py
@@ -28,6 +28,7 @@ from cStringIO import StringIO
from spec import load
from codec import EOF
from compat import SHUT_RDWR
+from exceptions import VersionError
class SockIO:
@@ -73,6 +74,9 @@ def listen(host, port, predicate = lambda: True):
s, a = sock.accept()
yield SockIO(s)
+class FramingError(Exception):
+ pass
+
class Connection:
def __init__(self, io, spec):
@@ -107,7 +111,16 @@ class Connection:
def read_8_0(self):
c = self.codec
- type = self.spec.constants.byid[c.decode_octet()].name
+ tid = c.decode_octet()
+ try:
+ type = self.spec.constants.byid[tid].name
+ except KeyError:
+ if tid == ord('A') and c.unpack("!3s") == "MQP":
+ _, _, major, minor = c.unpack("4B")
+ raise VersionError("client: %s-%s, server: %s-%s" %
+ (self.spec.major, self.spec.minor, major, minor))
+ else:
+ raise FramingError("unknown frame type: %s" % tid)
channel = c.decode_short()
body = c.decode_longstr()
dec = codec.Codec(StringIO(body), self.spec)
@@ -122,6 +135,12 @@ class Connection:
raise "frame error: expected %r, got %r" % (self.FRAME_END, garbage)
return frame
+ def write_0_9(self, frame):
+ self.write_8_0(frame)
+
+ def read_0_9(self):
+ return self.read_8_0()
+
def write_0_10(self, frame):
c = self.codec
flags = 0
diff --git a/python/qpid/datatypes.py b/python/qpid/datatypes.py
index 7150caded2..61643715e4 100644
--- a/python/qpid/datatypes.py
+++ b/python/qpid/datatypes.py
@@ -17,7 +17,8 @@
# under the License.
#
-import threading, struct
+import threading, struct, datetime, time
+from exceptions import Timeout
class Struct:
@@ -83,7 +84,7 @@ class Message:
def get(self, name):
if self.headers:
for h in self.headers:
- if h._type.name == name:
+ if h.NAME == name:
return h
return None
@@ -92,7 +93,7 @@ class Message:
self.headers = []
idx = 0
while idx < len(self.headers):
- if self.headers[idx]._type == header._type:
+ if self.headers[idx].NAME == header.NAME:
self.headers[idx] = header
return
idx += 1
@@ -101,7 +102,7 @@ class Message:
def clear(self, name):
idx = 0
while idx < len(self.headers):
- if self.headers[idx]._type.name == name:
+ if self.headers[idx].NAME == name:
del self.headers[idx]
return
idx += 1
@@ -125,19 +126,19 @@ def serial(o):
class Serial:
def __init__(self, value):
- self.value = value & 0xFFFFFFFF
+ self.value = value & 0xFFFFFFFFL
def __hash__(self):
return hash(self.value)
def __cmp__(self, other):
- if other is None:
+ if other.__class__ not in (int, long, Serial):
return 1
other = serial(other)
- delta = (self.value - other.value) & 0xFFFFFFFF
- neg = delta & 0x80000000
+ delta = (self.value - other.value) & 0xFFFFFFFFL
+ neg = delta & 0x80000000L
mag = delta & 0x7FFFFFFF
if neg:
@@ -149,7 +150,10 @@ class Serial:
return Serial(self.value + other)
def __sub__(self, other):
- return Serial(self.value - other)
+ if isinstance(other, Serial):
+ return self.value - other.value
+ else:
+ return Serial(self.value - other)
def __repr__(self):
return "serial(%s)" % self.value
@@ -168,7 +172,7 @@ class Range:
def __contains__(self, n):
return self.lower <= n and n <= self.upper
-
+
def __iter__(self):
i = self.lower
while i <= self.upper:
@@ -229,7 +233,25 @@ class RangedSet:
def add(self, lower, upper = None):
self.add_range(Range(lower, upper))
-
+
+ def empty(self):
+ for r in self.ranges:
+ if r.lower <= r.upper:
+ return False
+ return True
+
+ def max(self):
+ if self.ranges:
+ return self.ranges[-1].upper
+ else:
+ return None
+
+ def min(self):
+ if self.ranges:
+ return self.ranges[0].lower
+ else:
+ return None
+
def __iter__(self):
return iter(self.ranges)
@@ -253,9 +275,12 @@ class Future:
def get(self, timeout=None):
self._set.wait(timeout)
- if self._error != None:
- raise self.exception(self._error)
- return self.value
+ if self._set.isSet():
+ if self._error != None:
+ raise self.exception(self._error)
+ return self.value
+ else:
+ raise Timeout()
def is_set(self):
return self._set.isSet()
@@ -289,10 +314,62 @@ class UUID:
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.bytes, other.bytes)
- raise NotImplemented()
+ else:
+ return -1
def __str__(self):
return "%08x-%04x-%04x-%04x-%04x%08x" % struct.unpack("!LHHHHL", self.bytes)
def __repr__(self):
return "UUID(%r)" % str(self)
+
+ def __hash__(self):
+ return self.bytes.__hash__()
+
+class timestamp(float):
+
+ def __new__(cls, obj=None):
+ if obj is None:
+ obj = time.time()
+ elif isinstance(obj, datetime.datetime):
+ obj = time.mktime(obj.timetuple()) + 1e-6 * obj.microsecond
+ return super(timestamp, cls).__new__(cls, obj)
+
+ def datetime(self):
+ return datetime.datetime.fromtimestamp(self)
+
+ def __add__(self, other):
+ if isinstance(other, datetime.timedelta):
+ return timestamp(self.datetime() + other)
+ else:
+ return timestamp(float(self) + other)
+
+ def __sub__(self, other):
+ if isinstance(other, datetime.timedelta):
+ return timestamp(self.datetime() - other)
+ else:
+ return timestamp(float(self) - other)
+
+ def __radd__(self, other):
+ if isinstance(other, datetime.timedelta):
+ return timestamp(self.datetime() + other)
+ else:
+ return timestamp(other + float(self))
+
+ def __rsub__(self, other):
+ if isinstance(other, datetime.timedelta):
+ return timestamp(self.datetime() - other)
+ else:
+ return timestamp(other - float(self))
+
+ def __neg__(self):
+ return timestamp(-float(self))
+
+ def __pos__(self):
+ return self
+
+ def __abs__(self):
+ return timestamp(abs(float(self)))
+
+ def __repr__(self):
+ return "timestamp(%r)" % float(self)
diff --git a/python/qpid/debug.py b/python/qpid/debug.py
new file mode 100644
index 0000000000..b5dbd4d9d9
--- /dev/null
+++ b/python/qpid/debug.py
@@ -0,0 +1,55 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import threading, traceback, signal, sys, time
+
+def stackdump(sig, frm):
+ code = []
+ for threadId, stack in sys._current_frames().items():
+ code.append("\n# ThreadID: %s" % threadId)
+ for filename, lineno, name, line in traceback.extract_stack(stack):
+ code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
+ if line:
+ code.append(" %s" % (line.strip()))
+ print "\n".join(code)
+
+signal.signal(signal.SIGQUIT, stackdump)
+
+class LoudLock:
+
+ def __init__(self):
+ self.lock = threading.RLock()
+
+ def acquire(self, blocking=1):
+ while not self.lock.acquire(blocking=0):
+ time.sleep(1)
+ print >> sys.out, "TRYING"
+ traceback.print_stack(None, None, out)
+ print >> sys.out, "TRYING"
+ print >> sys.out, "ACQUIRED"
+ traceback.print_stack(None, None, out)
+ print >> sys.out, "ACQUIRED"
+ return True
+
+ def _is_owned(self):
+ return self.lock._is_owned()
+
+ def release(self):
+ self.lock.release()
+
diff --git a/python/qpid/delegates.py b/python/qpid/delegates.py
index bf26553dda..4c41a6241f 100644
--- a/python/qpid/delegates.py
+++ b/python/qpid/delegates.py
@@ -20,7 +20,17 @@
import os, connection, session
from util import notify
from datatypes import RangedSet
+from exceptions import VersionError, Closed
from logging import getLogger
+from ops import Control
+import sys
+
+_have_sasl = None
+try:
+ import saslwrapper
+ _have_sasl = True
+except:
+ pass
log = getLogger("qpid.io.ctl")
@@ -28,26 +38,22 @@ class Delegate:
def __init__(self, connection, delegate=session.client):
self.connection = connection
- self.spec = connection.spec
self.delegate = delegate
- self.control = self.spec["track.control"].value
- def received(self, seg):
- ssn = self.connection.attached.get(seg.channel)
+ def received(self, op):
+ ssn = self.connection.attached.get(op.channel)
if ssn is None:
- ch = connection.Channel(self.connection, seg.channel)
+ ch = connection.Channel(self.connection, op.channel)
else:
ch = ssn.channel
- if seg.track == self.control:
- ctl = seg.decode(self.spec)
- log.debug("RECV %s", ctl)
- attr = ctl._type.qname.replace(".", "_")
- getattr(self, attr)(ch, ctl)
+ if isinstance(op, Control):
+ log.debug("RECV %s", op)
+ getattr(self, op.NAME)(ch, op)
elif ssn is None:
ch.session_detached()
else:
- ssn.received(seg)
+ ssn.received(op)
def connection_close(self, ch, close):
self.connection.close_code = (close.reply_code, close.reply_text)
@@ -59,8 +65,12 @@ class Delegate:
def connection_close_ok(self, ch, close_ok):
self.connection.opened = False
+ self.connection.closed = True
notify(self.connection.condition)
+ def connection_heartbeat(self, ch, hrt):
+ pass
+
def session_attach(self, ch, a):
try:
self.connection.attach(a.name, ch, self.delegate, a.force)
@@ -119,7 +129,8 @@ class Server(Delegate):
def start(self):
self.connection.read_header()
- self.connection.write_header(self.spec.major, self.spec.minor)
+ # XXX
+ self.connection.write_header(0, 10)
connection.Channel(self.connection, 0).connection_start(mechanisms=["ANONYMOUS"])
def connection_start_ok(self, ch, start_ok):
@@ -135,28 +146,101 @@ class Server(Delegate):
class Client(Delegate):
+ ppid = 0
+ try:
+ ppid = os.getppid()
+ except:
+ pass
+
PROPERTIES = {"product": "qpid python client",
"version": "development",
- "platform": os.name}
+ "platform": os.name,
+ "qpid.client_process": os.path.basename(sys.argv[0]),
+ "qpid.client_pid": os.getpid(),
+ "qpid.client_ppid": ppid}
- def __init__(self, connection, username="guest", password="guest", mechanism="PLAIN"):
+ def __init__(self, connection, username=None, password=None,
+ mechanism=None, heartbeat=None, **kwargs):
Delegate.__init__(self, connection)
- self.username = username
- self.password = password
- self.mechanism = mechanism
+
+ ##
+ ## self.acceptableMechanisms is the list of SASL mechanisms that the client is willing to
+ ## use. If it's None, then any mechanism is acceptable.
+ ##
+ self.acceptableMechanisms = None
+ if mechanism:
+ self.acceptableMechanisms = mechanism.split(" ")
+ self.heartbeat = heartbeat
+ self.username = username
+ self.password = password
+
+ if _have_sasl:
+ self.sasl = saslwrapper.Client()
+ if username and len(username) > 0:
+ self.sasl.setAttr("username", str(username))
+ if password and len(password) > 0:
+ self.sasl.setAttr("password", str(password))
+ if "service" in kwargs:
+ self.sasl.setAttr("service", str(kwargs["service"]))
+ if "host" in kwargs:
+ self.sasl.setAttr("host", str(kwargs["host"]))
+ if "min_ssf" in kwargs:
+ self.sasl.setAttr("minssf", kwargs["min_ssf"])
+ if "max_ssf" in kwargs:
+ self.sasl.setAttr("maxssf", kwargs["max_ssf"])
+ self.sasl.init()
def start(self):
- self.connection.write_header(self.spec.major, self.spec.minor)
- self.connection.read_header()
+ # XXX
+ cli_major = 0
+ cli_minor = 10
+ self.connection.write_header(cli_major, cli_minor)
+ magic, _, _, major, minor = self.connection.read_header()
+ if not (magic == "AMQP" and major == cli_major and minor == cli_minor):
+ raise VersionError("client: %s-%s, server: %s-%s" %
+ (cli_major, cli_minor, major, minor))
def connection_start(self, ch, start):
- r = "\0%s\0%s" % (self.username, self.password)
- ch.connection_start_ok(client_properties=Client.PROPERTIES, mechanism=self.mechanism, response=r)
+ mech_list = ""
+ for mech in start.mechanisms:
+ if (not self.acceptableMechanisms) or mech in self.acceptableMechanisms:
+ mech_list += str(mech) + " "
+ mech = None
+ initial = None
+ if _have_sasl:
+ status, mech, initial = self.sasl.start(mech_list)
+ if status == False:
+ raise Closed("SASL error: %s" % self.sasl.getError())
+ else:
+ if self.username and self.password and ("PLAIN" in mech_list):
+ mech = "PLAIN"
+ initial = "\0%s\0%s" % (self.username, self.password)
+ else:
+ mech = "ANONYMOUS"
+ if not mech in mech_list:
+ raise Closed("No acceptable SASL authentication mechanism available")
+ ch.connection_start_ok(client_properties=Client.PROPERTIES, mechanism=mech, response=initial)
+
+ def connection_secure(self, ch, secure):
+ resp = None
+ if _have_sasl:
+ status, resp = self.sasl.step(secure.challenge)
+ if status == False:
+ raise Closed("SASL error: %s" % self.sasl.getError())
+ ch.connection_secure_ok(response=resp)
def connection_tune(self, ch, tune):
- ch.connection_tune_ok()
+ ch.connection_tune_ok(heartbeat=self.heartbeat)
ch.connection_open()
+ if _have_sasl:
+ self.connection.user_id = self.sasl.getUserId()
+ self.connection.security_layer_tx = self.sasl
def connection_open_ok(self, ch, open_ok):
+ if _have_sasl:
+ self.connection.security_layer_rx = self.sasl
self.connection.opened = True
notify(self.connection.condition)
+
+ def connection_heartbeat(self, ch, hrt):
+ ch.connection_heartbeat()
diff --git a/python/qpid/disp.py b/python/qpid/disp.py
index d697cd0136..1b315c9d98 100644
--- a/python/qpid/disp.py
+++ b/python/qpid/disp.py
@@ -21,16 +21,115 @@
from time import strftime, gmtime
+class Header:
+ """ """
+ NONE = 1
+ KMG = 2
+ YN = 3
+ Y = 4
+ TIME_LONG = 5
+ TIME_SHORT = 6
+ DURATION = 7
+
+ def __init__(self, text, format=NONE):
+ self.text = text
+ self.format = format
+
+ def __repr__(self):
+ return self.text
+
+ def __str__(self):
+ return self.text
+
+ def formatted(self, value):
+ try:
+ if value == None:
+ return ''
+ if self.format == Header.NONE:
+ return value
+ if self.format == Header.KMG:
+ return self.num(value)
+ if self.format == Header.YN:
+ if value:
+ return 'Y'
+ return 'N'
+ if self.format == Header.Y:
+ if value:
+ return 'Y'
+ return ''
+ if self.format == Header.TIME_LONG:
+ return strftime("%c", gmtime(value / 1000000000))
+ if self.format == Header.TIME_SHORT:
+ return strftime("%X", gmtime(value / 1000000000))
+ if self.format == Header.DURATION:
+ if value < 0: value = 0
+ sec = value / 1000000000
+ min = sec / 60
+ hour = min / 60
+ day = hour / 24
+ result = ""
+ if day > 0:
+ result = "%dd " % day
+ if hour > 0 or result != "":
+ result += "%dh " % (hour % 24)
+ if min > 0 or result != "":
+ result += "%dm " % (min % 60)
+ result += "%ds" % (sec % 60)
+ return result
+ except:
+ return "?"
+
+ def numCell(self, value, tag):
+ fp = float(value) / 1000.
+ if fp < 10.0:
+ return "%1.2f%c" % (fp, tag)
+ if fp < 100.0:
+ return "%2.1f%c" % (fp, tag)
+ return "%4d%c" % (value / 1000, tag)
+
+ def num(self, value):
+ if value < 1000:
+ return "%4d" % value
+ if value < 1000000:
+ return self.numCell(value, 'k')
+ value /= 1000
+ if value < 1000000:
+ return self.numCell(value, 'm')
+ value /= 1000
+ return self.numCell(value, 'g')
+
+
class Display:
""" Display formatting for QPID Management CLI """
- def __init__ (self):
- self.tableSpacing = 2
- self.tablePrefix = " "
+ def __init__(self, spacing=2, prefix=" "):
+ self.tableSpacing = spacing
+ self.tablePrefix = prefix
self.timestampFormat = "%X"
- def table (self, title, heads, rows):
- """ Print a formatted table with autosized columns """
+ def formattedTable(self, title, heads, rows):
+ fRows = []
+ for row in rows:
+ fRow = []
+ col = 0
+ for cell in row:
+ fRow.append(heads[col].formatted(cell))
+ col += 1
+ fRows.append(fRow)
+ headtext = []
+ for head in heads:
+ headtext.append(head.text)
+ self.table(title, headtext, fRows)
+
+ def table(self, title, heads, rows):
+ """ Print a table with autosized columns """
+
+ # Pad the rows to the number of heads
+ for row in rows:
+ diff = len(heads) - len(row)
+ for idx in range(diff):
+ row.append("")
+
print title
if len (rows) == 0:
return
@@ -40,7 +139,7 @@ class Display:
for head in heads:
width = len (head)
for row in rows:
- cellWidth = len (str (row[col]))
+ cellWidth = len (unicode (row[col]))
if cellWidth > width:
width = cellWidth
colWidth.append (width + self.tableSpacing)
@@ -60,9 +159,9 @@ class Display:
line = self.tablePrefix
col = 0
for width in colWidth:
- line = line + str (row[col])
+ line = line + unicode (row[col])
if col < len (heads) - 1:
- for i in range (width - len (str (row[col]))):
+ for i in range (width - len (unicode (row[col]))):
line = line + " "
col = col + 1
print line
@@ -77,3 +176,59 @@ class Display:
def timestamp (self, nsec):
""" Format a nanosecond-since-the-epoch timestamp for printing """
return strftime (self.timestampFormat, gmtime (nsec / 1000000000))
+
+ def duration(self, nsec):
+ if nsec < 0: nsec = 0
+ sec = nsec / 1000000000
+ min = sec / 60
+ hour = min / 60
+ day = hour / 24
+ result = ""
+ if day > 0:
+ result = "%dd " % day
+ if hour > 0 or result != "":
+ result += "%dh " % (hour % 24)
+ if min > 0 or result != "":
+ result += "%dm " % (min % 60)
+ result += "%ds" % (sec % 60)
+ return result
+
+class Sortable:
+ """ """
+ def __init__(self, row, sortIndex):
+ self.row = row
+ self.sortIndex = sortIndex
+ if sortIndex >= len(row):
+ raise Exception("sort index exceeds row boundary")
+
+ def __cmp__(self, other):
+ return cmp(self.row[self.sortIndex], other.row[self.sortIndex])
+
+ def getRow(self):
+ return self.row
+
+class Sorter:
+ """ """
+ def __init__(self, heads, rows, sortCol, limit=0, inc=True):
+ col = 0
+ for head in heads:
+ if head.text == sortCol:
+ break
+ col += 1
+ if col == len(heads):
+ raise Exception("sortCol '%s', not found in headers" % sortCol)
+
+ list = []
+ for row in rows:
+ list.append(Sortable(row, col))
+ list.sort(reverse=not inc)
+ count = 0
+ self.sorted = []
+ for row in list:
+ self.sorted.append(row.getRow())
+ count += 1
+ if count == limit:
+ break
+
+ def getSorted(self):
+ return self.sorted
diff --git a/python/qpid/driver.py b/python/qpid/driver.py
new file mode 100644
index 0000000000..2851c3aad3
--- /dev/null
+++ b/python/qpid/driver.py
@@ -0,0 +1,859 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import address, compat, connection, socket, struct, sys, time
+from concurrency import synchronized
+from datatypes import RangedSet, Serial
+from exceptions import Timeout, VersionError
+from framing import OpEncoder, SegmentEncoder, FrameEncoder, FrameDecoder, SegmentDecoder, OpDecoder
+from logging import getLogger
+from messaging import get_codec, ConnectError, Message, Pattern, UNLIMITED
+from ops import *
+from selector import Selector
+from threading import Condition, Thread
+from util import connect
+
+log = getLogger("qpid.messaging")
+rawlog = getLogger("qpid.messaging.io.raw")
+opslog = getLogger("qpid.messaging.io.ops")
+
+def addr2reply_to(addr):
+ name, subject, options = address.parse(addr)
+ return ReplyTo(name, subject)
+
+def reply_to2addr(reply_to):
+ if reply_to.routing_key is None:
+ return reply_to.exchange
+ elif reply_to.exchange in (None, ""):
+ return reply_to.routing_key
+ else:
+ return "%s/%s" % (reply_to.exchange, reply_to.routing_key)
+
+class Attachment:
+
+ def __init__(self, target):
+ self.target = target
+
+# XXX
+
+DURABLE_DEFAULT=True
+
+# XXX
+
+FILTER_DEFAULTS = {
+ "topic": Pattern("*")
+ }
+
+# XXX
+ppid = 0
+try:
+ ppid = os.getppid()
+except:
+ pass
+
+CLIENT_PROPERTIES = {"product": "qpid python client",
+ "version": "development",
+ "platform": os.name,
+ "qpid.client_process": os.path.basename(sys.argv[0]),
+ "qpid.client_pid": os.getpid(),
+ "qpid.client_ppid": ppid}
+
+def noop(): pass
+
+class SessionState:
+
+ def __init__(self, driver, session, name, channel):
+ self.driver = driver
+ self.session = session
+ self.name = name
+ self.channel = channel
+ self.detached = False
+ self.committing = False
+ self.aborting = False
+
+ # sender state
+ self.sent = Serial(0)
+ self.acknowledged = RangedSet()
+ self.actions = {}
+ self.min_completion = self.sent
+ self.max_completion = self.sent
+ self.results = {}
+
+ # receiver state
+ self.received = None
+ self.executed = RangedSet()
+
+ # XXX: need to periodically exchange completion/known_completion
+
+ def write_query(self, query, handler):
+ id = self.sent
+ self.write_cmd(query, lambda: handler(self.results.pop(id)))
+
+ def write_cmd(self, cmd, action=noop):
+ if action != noop:
+ cmd.sync = True
+ if self.detached:
+ raise Exception("detached")
+ cmd.id = self.sent
+ self.sent += 1
+ self.actions[cmd.id] = action
+ self.max_completion = cmd.id
+ self.write_op(cmd)
+
+ def write_op(self, op):
+ op.channel = self.channel
+ self.driver.write_op(op)
+
+# XXX
+HEADER="!4s4B"
+
+EMPTY_DP = DeliveryProperties()
+EMPTY_MP = MessageProperties()
+
+SUBJECT = "qpid.subject"
+TO = "qpid.to"
+
+class Driver:
+
+ def __init__(self, connection):
+ self.connection = connection
+ self._lock = self.connection._lock
+
+ self._selector = Selector.default()
+ self.reset()
+
+ def reset(self):
+ self._opening = False
+ self._closing = False
+ self._connected = False
+ self._attachments = {}
+
+ self._channel_max = 65536
+ self._channels = 0
+ self._sessions = {}
+
+ self._socket = None
+ self._buf = ""
+ self._hdr = ""
+ self._op_enc = OpEncoder()
+ self._seg_enc = SegmentEncoder()
+ self._frame_enc = FrameEncoder()
+ self._frame_dec = FrameDecoder()
+ self._seg_dec = SegmentDecoder()
+ self._op_dec = OpDecoder()
+ self._timeout = None
+
+ for ssn in self.connection.sessions.values():
+ for m in ssn.acked + ssn.unacked + ssn.incoming:
+ m._transfer_id = None
+ for snd in ssn.senders:
+ snd.linked = False
+ for rcv in ssn.receivers:
+ rcv.impending = rcv.received
+ rcv.linked = False
+
+ @synchronized
+ def wakeup(self):
+ self.dispatch()
+ self._selector.wakeup()
+
+ def start(self):
+ self._selector.register(self)
+
+ def fileno(self):
+ return self._socket.fileno()
+
+ @synchronized
+ def reading(self):
+ return self._socket is not None
+
+ @synchronized
+ def writing(self):
+ return self._socket is not None and self._buf
+
+ @synchronized
+ def timing(self):
+ return self._timeout
+
+ @synchronized
+ def readable(self):
+ error = None
+ recoverable = False
+ try:
+ data = self._socket.recv(64*1024)
+ if data:
+ rawlog.debug("READ: %r", data)
+ else:
+ rawlog.debug("ABORTED: %s", self._socket.getpeername())
+ error = "connection aborted"
+ recoverable = True
+ except socket.error, e:
+ error = e
+ recoverable = True
+
+ if not error:
+ try:
+ if len(self._hdr) < 8:
+ r = 8 - len(self._hdr)
+ self._hdr += data[:r]
+ data = data[r:]
+
+ if len(self._hdr) == 8:
+ self.do_header(self._hdr)
+
+ self._frame_dec.write(data)
+ self._seg_dec.write(*self._frame_dec.read())
+ self._op_dec.write(*self._seg_dec.read())
+ for op in self._op_dec.read():
+ self.assign_id(op)
+ opslog.debug("RCVD: %r", op)
+ op.dispatch(self)
+ except VersionError, e:
+ error = e
+ except:
+ msg = compat.format_exc()
+ error = msg
+
+ if error:
+ self._error(error, recoverable)
+ else:
+ self.dispatch()
+
+ self.connection._waiter.notifyAll()
+
+ def assign_id(self, op):
+ if isinstance(op, Command):
+ sst = self.get_sst(op)
+ op.id = sst.received
+ sst.received += 1
+
+ @synchronized
+ def writeable(self):
+ try:
+ n = self._socket.send(self._buf)
+ rawlog.debug("SENT: %r", self._buf[:n])
+ self._buf = self._buf[n:]
+ except socket.error, e:
+ self._error(e, True)
+ self.connection._waiter.notifyAll()
+
+ @synchronized
+ def timeout(self):
+ log.warn("retrying ...")
+ self.dispatch()
+ self.connection._waiter.notifyAll()
+
+ def _error(self, err, recoverable):
+ if self._socket is not None:
+ self._socket.close()
+ self.reset()
+ if recoverable and self.connection.reconnect:
+ self._timeout = time.time() + 3
+ log.warn("recoverable error: %s" % err)
+ log.warn("sleeping 3 seconds")
+ else:
+ self.connection.error = (err,)
+
+ def write_op(self, op):
+ opslog.debug("SENT: %r", op)
+ self._op_enc.write(op)
+ self._seg_enc.write(*self._op_enc.read())
+ self._frame_enc.write(*self._seg_enc.read())
+ self._buf += self._frame_enc.read()
+
+ def do_header(self, hdr):
+ cli_major = 0; cli_minor = 10
+ magic, _, _, major, minor = struct.unpack(HEADER, hdr)
+ if major != cli_major or minor != cli_minor:
+ raise VersionError("client: %s-%s, server: %s-%s" %
+ (cli_major, cli_minor, major, minor))
+
+ def do_connection_start(self, start):
+ # XXX: should we use some sort of callback for this?
+ r = "\0%s\0%s" % (self.connection.username, self.connection.password)
+ m = self.connection.mechanism
+ self.write_op(ConnectionStartOk(client_properties=CLIENT_PROPERTIES,
+ mechanism=m, response=r))
+
+ def do_connection_tune(self, tune):
+ # XXX: is heartbeat protocol specific?
+ if tune.channel_max is not None:
+ self.channel_max = tune.channel_max
+ self.write_op(ConnectionTuneOk(heartbeat=self.connection.heartbeat,
+ channel_max=self.channel_max))
+ self.write_op(ConnectionOpen())
+
+ def do_connection_open_ok(self, open_ok):
+ self._connected = True
+
+ def connection_heartbeat(self, hrt):
+ self.write_op(ConnectionHeartbeat())
+
+ def do_connection_close(self, close):
+ self.write_op(ConnectionCloseOk())
+ if close.reply_code != close_code.normal:
+ self.connection.error = (close.reply_code, close.reply_text)
+ # XXX: should we do a half shutdown on the socket here?
+ # XXX: we really need to test this, we may end up reporting a
+ # connection abort after this, if we were to do a shutdown on read
+ # and stop reading, then we wouldn't report the abort, that's
+ # probably the right thing to do
+
+ def do_connection_close_ok(self, close_ok):
+ self._socket.close()
+ self.reset()
+
+ def do_session_attached(self, atc):
+ pass
+
+ def do_session_command_point(self, cp):
+ sst = self.get_sst(cp)
+ sst.received = cp.command_id
+
+ def do_session_completed(self, sc):
+ sst = self.get_sst(sc)
+ for r in sc.commands:
+ sst.acknowledged.add(r.lower, r.upper)
+
+ if not sc.commands.empty():
+ while sst.min_completion in sc.commands:
+ if sst.actions.has_key(sst.min_completion):
+ sst.actions.pop(sst.min_completion)()
+ sst.min_completion += 1
+
+ def session_known_completed(self, kcmp):
+ sst = self.get_sst(kcmp)
+ executed = RangedSet()
+ for e in sst.executed.ranges:
+ for ke in kcmp.ranges:
+ if e.lower in ke and e.upper in ke:
+ break
+ else:
+ executed.add_range(e)
+ sst.executed = completed
+
+ def do_session_flush(self, sf):
+ sst = self.get_sst(sf)
+ if sf.expected:
+ if sst.received is None:
+ exp = None
+ else:
+ exp = RangedSet(sst.received)
+ sst.write_op(SessionExpected(exp))
+ if sf.confirmed:
+ sst.write_op(SessionConfirmed(sst.executed))
+ if sf.completed:
+ sst.write_op(SessionCompleted(sst.executed))
+
+ def do_execution_result(self, er):
+ sst = self.get_sst(er)
+ sst.results[er.command_id] = er.value
+
+ def do_execution_exception(self, ex):
+ sst = self.get_sst(ex)
+ sst.session.error = (ex,)
+
+ def dispatch(self):
+ try:
+ if self._socket is None and self.connection._connected and not self._opening:
+ self.connect()
+ elif self._socket is not None and not self.connection._connected and not self._closing:
+ self.disconnect()
+
+ if self._connected and not self._closing:
+ for ssn in self.connection.sessions.values():
+ self.attach(ssn)
+ self.process(ssn)
+ except:
+ msg = compat.format_exc()
+ self.connection.error = (msg,)
+
+ def connect(self):
+ try:
+ # XXX: should make this non blocking
+ self._socket = connect(self.connection.host, self.connection.port)
+ self._timeout = None
+ except socket.error, e:
+ if self.connection.reconnect:
+ self._error(e, True)
+ return
+ else:
+ raise e
+ self._buf += struct.pack(HEADER, "AMQP", 1, 1, 0, 10)
+ self._opening = True
+
+ def disconnect(self):
+ self.write_op(ConnectionClose(close_code.normal))
+ self._closing = True
+
+ def attach(self, ssn):
+ sst = self._attachments.get(ssn)
+ if sst is None and not ssn.closed:
+ for i in xrange(0, self.channel_max):
+ if not self._sessions.has_key(i):
+ ch = i
+ break
+ else:
+ raise RuntimeError("all channels used")
+ sst = SessionState(self, ssn, ssn.name, ch)
+ sst.write_op(SessionAttach(name=ssn.name))
+ sst.write_op(SessionCommandPoint(sst.sent, 0))
+ sst.outgoing_idx = 0
+ sst.acked = []
+ if ssn.transactional:
+ sst.write_cmd(TxSelect())
+ self._attachments[ssn] = sst
+ self._sessions[sst.channel] = sst
+
+ for snd in ssn.senders:
+ self.link_out(snd)
+ for rcv in ssn.receivers:
+ self.link_in(rcv)
+
+ if sst is not None and ssn.closing and not sst.detached:
+ sst.detached = True
+ sst.write_op(SessionDetach(name=ssn.name))
+
+ def get_sst(self, op):
+ return self._sessions[op.channel]
+
+ def do_session_detached(self, dtc):
+ sst = self._sessions.pop(dtc.channel)
+ ssn = sst.session
+ del self._attachments[ssn]
+ ssn.closed = True
+
+ def do_session_detach(self, dtc):
+ sst = self.get_sst(dtc)
+ sst.write_op(SessionDetached(name=dtc.name))
+ self.do_session_detached(dtc)
+
+ def link_out(self, snd):
+ sst = self._attachments.get(snd.session)
+ _snd = self._attachments.get(snd)
+ if _snd is None and not snd.closing and not snd.closed:
+ _snd = Attachment(snd)
+ _snd.closing = False
+
+ if snd.target is None:
+ snd.error = ("target is None",)
+ snd.closed = True
+ return
+
+ try:
+ _snd.name, _snd.subject, _snd.options = address.parse(snd.target)
+ except address.LexError, e:
+ snd.error = (e,)
+ snd.closed = True
+ return
+ except address.ParseError, e:
+ snd.error = (e,)
+ snd.closed = True
+ return
+
+ # XXX: subject
+ if _snd.options is None:
+ _snd.options = {}
+
+ def do_link(type, subtype):
+ if type == "topic":
+ _snd._exchange = _snd.name
+ _snd._routing_key = _snd.subject
+ elif type == "queue":
+ _snd._exchange = ""
+ _snd._routing_key = _snd.name
+
+ snd.linked = True
+
+ self.resolve_declare(sst, _snd, "sender", do_link)
+ self._attachments[snd] = _snd
+
+ if snd.linked and snd.closing and not (snd.closed or _snd.closing):
+ _snd.closing = True
+ def do_unlink():
+ del self._attachments[snd]
+ snd.closed = True
+ if _snd.options.get("delete") in ("always", "sender"):
+ self.delete(sst, _snd.name, do_unlink)
+ else:
+ do_unlink()
+
+ def link_in(self, rcv):
+ sst = self._attachments.get(rcv.session)
+ _rcv = self._attachments.get(rcv)
+ if _rcv is None and not rcv.closing and not rcv.closed:
+ _rcv = Attachment(rcv)
+ _rcv.canceled = False
+ _rcv.draining = False
+
+ if rcv.source is None:
+ rcv.error = ("source is None",)
+ rcv.closed = True
+ return
+
+ try:
+ _rcv.name, _rcv.subject, _rcv.options = address.parse(rcv.source)
+ except address.LexError, e:
+ rcv.error = (e,)
+ rcv.closed = True
+ return
+ except address.ParseError, e:
+ rcv.error = (e,)
+ rcv.closed = True
+ return
+
+ # XXX: subject
+ if _rcv.options is None:
+ _rcv.options = {}
+
+ def do_link(type, subtype):
+ if type == "topic":
+ _rcv._queue = "%s.%s" % (rcv.session.name, rcv.destination)
+ sst.write_cmd(QueueDeclare(queue=_rcv._queue, durable=DURABLE_DEFAULT, exclusive=True, auto_delete=True))
+ filter = _rcv.options.get("filter")
+ if _rcv.subject is None and filter is None:
+ f = FILTER_DEFAULTS[subtype]
+ elif _rcv.subject and filter:
+ # XXX
+ raise Exception("can't supply both subject and filter")
+ elif _rcv.subject:
+ # XXX
+ from messaging import Pattern
+ f = Pattern(_rcv.subject)
+ else:
+ f = filter
+ f._bind(sst, _rcv.name, _rcv._queue)
+ elif type == "queue":
+ _rcv._queue = _rcv.name
+
+ sst.write_cmd(MessageSubscribe(queue=_rcv._queue, destination=rcv.destination))
+ sst.write_cmd(MessageSetFlowMode(rcv.destination, flow_mode.credit))
+ rcv.linked = True
+
+ self.resolve_declare(sst, _rcv, "receiver", do_link)
+ self._attachments[rcv] = _rcv
+
+ if rcv.linked and rcv.closing and not rcv.closed:
+ if not _rcv.canceled:
+ def do_unlink():
+ del self._attachments[rcv]
+ rcv.closed = True
+ if _rcv.options.get("delete") in ("always", "receiver"):
+ sst.write_cmd(MessageCancel(rcv.destination))
+ self.delete(sst, _rcv.name, do_unlink)
+ else:
+ sst.write_cmd(MessageCancel(rcv.destination), do_unlink)
+ _rcv.canceled = True
+
+ def resolve_declare(self, sst, lnk, dir, action):
+ def do_resolved(er, qr):
+ if er.not_found and not qr.queue:
+ if lnk.options.get("create") in ("always", dir):
+ err = self.declare(sst, lnk.name, lnk.options, action)
+ else:
+ err = ("no such queue: %s" % lnk.name,)
+
+ if err:
+ tgt = lnk.target
+ tgt.error = err
+ del self._attachments[tgt]
+ tgt.closed = True
+ return
+ elif qr.queue:
+ action("queue", None)
+ else:
+ action("topic", er.type)
+ self.resolve(sst, lnk.name, do_resolved)
+
+ def resolve(self, sst, name, action):
+ args = []
+ def do_result(r):
+ args.append(r)
+ def do_action(r):
+ do_result(r)
+ action(*args)
+ sst.write_query(ExchangeQuery(name), do_result)
+ sst.write_query(QueueQuery(name), do_action)
+
+ def declare(self, sst, name, options, action):
+ opts = dict(options)
+ props = dict(opts.pop("node-properties", {}))
+ durable = props.pop("durable", DURABLE_DEFAULT)
+ type = props.pop("type", "queue")
+ xprops = dict(props.pop("x-properties", {}))
+
+ if props:
+ return ("unrecognized option(s): %s" % "".join(props.keys()),)
+
+ if type == "topic":
+ cmd = ExchangeDeclare(exchange=name, durable=durable)
+ elif type == "queue":
+ cmd = QueueDeclare(queue=name, durable=durable)
+ bindings = xprops.pop("bindings", [])
+ else:
+ return ("unrecognized type, must be topic or queue: %s" % type,)
+
+ for f in cmd.FIELDS:
+ if f.name != "arguments" and xprops.has_key(f.name):
+ cmd[f.name] = xprops.pop(f.name)
+ if xprops:
+ cmd.arguments = xprops
+
+ if type == "topic":
+ if cmd.type is None:
+ cmd.type = "topic"
+ subtype = cmd.type
+ else:
+ subtype = None
+
+ cmds = [cmd]
+ if type == "queue":
+ for b in bindings:
+ try:
+ n, s, o = address.parse(b)
+ except address.ParseError, e:
+ return (e,)
+ cmds.append(ExchangeBind(name, n, s, o))
+
+ for c in cmds[:-1]:
+ sst.write_cmd(c)
+ def do_action():
+ action(type, subtype)
+ sst.write_cmd(cmds[-1], do_action)
+
+ def delete(self, sst, name, action):
+ def do_delete(er, qr):
+ if not er.not_found:
+ sst.write_cmd(ExchangeDelete(name), action)
+ elif qr.queue:
+ sst.write_cmd(QueueDelete(name), action)
+ else:
+ action()
+ self.resolve(sst, name, do_delete)
+
+ def process(self, ssn):
+ if ssn.closed or ssn.closing: return
+
+ sst = self._attachments[ssn]
+
+ while sst.outgoing_idx < len(ssn.outgoing):
+ msg = ssn.outgoing[sst.outgoing_idx]
+ snd = msg._sender
+ # XXX: should check for sender error here
+ _snd = self._attachments.get(snd)
+ if _snd and snd.linked:
+ self.send(snd, msg)
+ sst.outgoing_idx += 1
+ else:
+ break
+
+ for rcv in ssn.receivers:
+ self.process_receiver(rcv)
+
+ if ssn.acked:
+ messages = [m for m in ssn.acked if m not in sst.acked]
+ if messages:
+ # XXX: we're ignoring acks that get lost when disconnected,
+ # could we deal this via some message-id based purge?
+ ids = RangedSet(*[m._transfer_id for m in messages if m._transfer_id is not None])
+ for range in ids:
+ sst.executed.add_range(range)
+ sst.write_op(SessionCompleted(sst.executed))
+ def ack_ack():
+ for m in messages:
+ ssn.acked.remove(m)
+ if not ssn.transactional:
+ sst.acked.remove(m)
+ sst.write_cmd(MessageAccept(ids), ack_ack)
+ sst.acked.extend(messages)
+
+ if ssn.committing and not sst.committing:
+ def commit_ok():
+ del sst.acked[:]
+ ssn.committing = False
+ ssn.committed = True
+ ssn.aborting = False
+ ssn.aborted = False
+ sst.write_cmd(TxCommit(), commit_ok)
+ sst.committing = True
+
+ if ssn.aborting and not sst.aborting:
+ sst.aborting = True
+ def do_rb():
+ messages = sst.acked + ssn.unacked + ssn.incoming
+ ids = RangedSet(*[m._transfer_id for m in messages])
+ for range in ids:
+ sst.executed.add_range(range)
+ sst.write_op(SessionCompleted(sst.executed))
+ sst.write_cmd(MessageRelease(ids))
+ sst.write_cmd(TxRollback(), do_rb_ok)
+
+ def do_rb_ok():
+ del ssn.incoming[:]
+ del ssn.unacked[:]
+ del sst.acked[:]
+
+ for rcv in ssn.receivers:
+ rcv.impending = rcv.received
+ rcv.returned = rcv.received
+ # XXX: do we need to update granted here as well?
+
+ for rcv in ssn.receivers:
+ self.process_receiver(rcv)
+
+ ssn.aborting = False
+ ssn.aborted = True
+ ssn.committing = False
+ ssn.committed = False
+ sst.aborting = False
+
+ for rcv in ssn.receivers:
+ sst.write_cmd(MessageStop(rcv.destination))
+ sst.write_cmd(ExecutionSync(), do_rb)
+
+ def grant(self, rcv):
+ sst = self._attachments[rcv.session]
+ _rcv = self._attachments.get(rcv)
+ if _rcv is None or not rcv.linked or _rcv.canceled or _rcv.draining:
+ return
+
+ if rcv.granted is UNLIMITED:
+ if rcv.impending is UNLIMITED:
+ delta = 0
+ else:
+ delta = UNLIMITED
+ elif rcv.impending is UNLIMITED:
+ delta = -1
+ else:
+ delta = max(rcv.granted, rcv.received) - rcv.impending
+
+ if delta is UNLIMITED:
+ sst.write_cmd(MessageFlow(rcv.destination, credit_unit.byte, UNLIMITED.value))
+ sst.write_cmd(MessageFlow(rcv.destination, credit_unit.message, UNLIMITED.value))
+ rcv.impending = UNLIMITED
+ elif delta > 0:
+ sst.write_cmd(MessageFlow(rcv.destination, credit_unit.byte, UNLIMITED.value))
+ sst.write_cmd(MessageFlow(rcv.destination, credit_unit.message, delta))
+ rcv.impending += delta
+ elif delta < 0 and not rcv.draining:
+ _rcv.draining = True
+ def do_stop():
+ rcv.impending = rcv.received
+ _rcv.draining = False
+ self.grant(rcv)
+ sst.write_cmd(MessageStop(rcv.destination), do_stop)
+
+ if rcv.draining:
+ _rcv.draining = True
+ def do_flush():
+ rcv.impending = rcv.received
+ rcv.granted = rcv.impending
+ _rcv.draining = False
+ rcv.draining = False
+ sst.write_cmd(MessageFlush(rcv.destination), do_flush)
+
+
+ def process_receiver(self, rcv):
+ if rcv.closed: return
+ self.grant(rcv)
+
+ def send(self, snd, msg):
+ sst = self._attachments[snd.session]
+ _snd = self._attachments[snd]
+
+ # XXX: what if subject is specified for a normal queue?
+ if _snd._routing_key is None:
+ rk = msg.subject
+ else:
+ rk = _snd._routing_key
+ # XXX: do we need to query to figure out how to create the reply-to interoperably?
+ if msg.reply_to:
+ rt = addr2reply_to(msg.reply_to)
+ else:
+ rt = None
+ dp = DeliveryProperties(routing_key=rk)
+ mp = MessageProperties(message_id=msg.id,
+ user_id=msg.user_id,
+ reply_to=rt,
+ correlation_id=msg.correlation_id,
+ content_type=msg.content_type,
+ application_headers=msg.properties)
+ if msg.subject is not None:
+ if mp.application_headers is None:
+ mp.application_headers = {}
+ mp.application_headers[SUBJECT] = msg.subject
+ if msg.to is not None:
+ if mp.application_headers is None:
+ mp.application_headers = {}
+ mp.application_headers[TO] = msg.to
+ if msg.durable:
+ dp.delivery_mode = delivery_mode.persistent
+ enc, dec = get_codec(msg.content_type)
+ body = enc(msg.content)
+ def msg_acked():
+ # XXX: should we log the ack somehow too?
+ snd.acked += 1
+ m = snd.session.outgoing.pop(0)
+ sst.outgoing_idx -= 1
+ assert msg == m
+ sst.write_cmd(MessageTransfer(destination=_snd._exchange, headers=(dp, mp),
+ payload=body), msg_acked)
+
+ def do_message_transfer(self, xfr):
+ sst = self.get_sst(xfr)
+ ssn = sst.session
+
+ msg = self._decode(xfr)
+ rcv = ssn.receivers[int(xfr.destination)]
+ msg._receiver = rcv
+ if rcv.impending is not UNLIMITED:
+ assert rcv.received < rcv.impending, "%s, %s" % (rcv.received, rcv.impending)
+ rcv.received += 1
+ log.debug("RECV [%s] %s", ssn, msg)
+ ssn.incoming.append(msg)
+ self.connection._waiter.notifyAll()
+
+ def _decode(self, xfr):
+ dp = EMPTY_DP
+ mp = EMPTY_MP
+
+ for h in xfr.headers:
+ if isinstance(h, DeliveryProperties):
+ dp = h
+ elif isinstance(h, MessageProperties):
+ mp = h
+
+ ap = mp.application_headers
+ enc, dec = get_codec(mp.content_type)
+ content = dec(xfr.payload)
+ msg = Message(content)
+ msg.id = mp.message_id
+ if ap is not None:
+ msg.to = ap.get(TO)
+ msg.subject = ap.get(SUBJECT)
+ msg.user_id = mp.user_id
+ if mp.reply_to is not None:
+ msg.reply_to = reply_to2addr(mp.reply_to)
+ msg.correlation_id = mp.correlation_id
+ msg.durable = dp.delivery_mode == delivery_mode.persistent
+ msg.redelivered = dp.redelivered
+ msg.properties = mp.application_headers
+ msg.content_type = mp.content_type
+ msg._transfer_id = xfr.id
+ return msg
diff --git a/python/qpid/exceptions.py b/python/qpid/exceptions.py
index 7eaaf81ed4..2bd80b7ffe 100644
--- a/python/qpid/exceptions.py
+++ b/python/qpid/exceptions.py
@@ -19,3 +19,4 @@
class Closed(Exception): pass
class Timeout(Exception): pass
+class VersionError(Exception): pass
diff --git a/python/qpid/framer.py b/python/qpid/framer.py
index f6363b2291..47f57cf649 100644
--- a/python/qpid/framer.py
+++ b/python/qpid/framer.py
@@ -26,47 +26,6 @@ from logging import getLogger
raw = getLogger("qpid.io.raw")
frm = getLogger("qpid.io.frm")
-FIRST_SEG = 0x08
-LAST_SEG = 0x04
-FIRST_FRM = 0x02
-LAST_FRM = 0x01
-
-class Frame:
-
- HEADER = "!2BHxBH4x"
- MAX_PAYLOAD = 65535 - struct.calcsize(HEADER)
-
- def __init__(self, flags, type, track, channel, payload):
- if len(payload) > Frame.MAX_PAYLOAD:
- raise ValueError("max payload size exceeded: %s" % len(payload))
- self.flags = flags
- self.type = type
- self.track = track
- self.channel = channel
- self.payload = payload
-
- def isFirstSegment(self):
- return bool(FIRST_SEG & self.flags)
-
- def isLastSegment(self):
- return bool(LAST_SEG & self.flags)
-
- def isFirstFrame(self):
- return bool(FIRST_FRM & self.flags)
-
- def isLastFrame(self):
- return bool(LAST_FRM & self.flags)
-
- def __str__(self):
- return "%s%s%s%s %s %s %s %r" % (int(self.isFirstSegment()),
- int(self.isLastSegment()),
- int(self.isFirstFrame()),
- int(self.isLastFrame()),
- self.type,
- self.track,
- self.channel,
- self.payload)
-
class FramingError(Exception): pass
class Framer(Packer):
@@ -76,19 +35,29 @@ class Framer(Packer):
def __init__(self, sock):
self.sock = sock
self.sock_lock = RLock()
- self._buf = ""
+ self.tx_buf = ""
+ self.rx_buf = ""
+ self.security_layer_tx = None
+ self.security_layer_rx = None
+ self.maxbufsize = 65535
def aborted(self):
return False
def write(self, buf):
- self._buf += buf
+ self.tx_buf += buf
def flush(self):
self.sock_lock.acquire()
try:
- self._write(self._buf)
- self._buf = ""
+ if self.security_layer_tx:
+ status, cipher_buf = self.security_layer_tx.encode(self.tx_buf)
+ if status == False:
+ raise Closed(self.security_layer_tx.getError())
+ self._write(cipher_buf)
+ else:
+ self._write(self.tx_buf)
+ self.tx_buf = ""
frm.debug("FLUSHED")
finally:
self.sock_lock.release()
@@ -105,25 +74,42 @@ class Framer(Packer):
raw.debug("SENT %r", buf[:n])
buf = buf[n:]
+ ##
+ ## Implementation Note:
+ ##
+ ## This function was modified to use the SASL security layer for content
+ ## decryption. As such, the socket read should read in "self.maxbufsize"
+ ## instead of "n" (the requested number of octets). However, since this
+ ## is one of two places in the code where the socket is read, the read
+ ## size had to be left at "n". This is because this function is
+ ## apparently only used to read the first 8 octets from a TCP socket. If
+ ## we read beyond "n" octets, the remaing octets won't be processed and
+ ## the connection handshake will fail.
+ ##
def read(self, n):
- data = ""
- while len(data) < n:
+ while len(self.rx_buf) < n:
try:
- s = self.sock.recv(n - len(data))
+ s = self.sock.recv(n) # NOTE: instead of "n", arg should be "self.maxbufsize"
+ if self.security_layer_rx:
+ status, s = self.security_layer_rx.decode(s)
+ if status == False:
+ raise Closed(self.security_layer_tx.getError())
except socket.timeout:
if self.aborted():
raise Closed()
else:
continue
except socket.error, e:
- if data != "":
+ if self.rx_buf != "":
raise e
else:
raise Closed()
if len(s) == 0:
raise Closed()
- data += s
+ self.rx_buf += s
raw.debug("RECV %r", s)
+ data = self.rx_buf[0:n]
+ self.rx_buf = self.rx_buf[n:]
return data
def read_header(self):
@@ -136,24 +122,3 @@ class Framer(Packer):
self.flush()
finally:
self.sock_lock.release()
-
- def write_frame(self, frame):
- self.sock_lock.acquire()
- try:
- size = len(frame.payload) + struct.calcsize(Frame.HEADER)
- track = frame.track & 0x0F
- self.pack(Frame.HEADER, frame.flags, frame.type, size, track, frame.channel)
- self.write(frame.payload)
- if frame.isLastSegment() and frame.isLastFrame():
- self.flush()
- frm.debug("SENT %s", frame)
- finally:
- self.sock_lock.release()
-
- def read_frame(self):
- flags, type, size, track, channel = self.unpack(Frame.HEADER)
- if flags & 0xF0: raise FramingError()
- payload = self.read(size - struct.calcsize(Frame.HEADER))
- frame = Frame(flags, type, track, channel, payload)
- frm.debug("RECV %s", frame)
- return frame
diff --git a/python/qpid/framing.py b/python/qpid/framing.py
new file mode 100644
index 0000000000..0a8f26272c
--- /dev/null
+++ b/python/qpid/framing.py
@@ -0,0 +1,310 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import struct
+
+FIRST_SEG = 0x08
+LAST_SEG = 0x04
+FIRST_FRM = 0x02
+LAST_FRM = 0x01
+
+class Frame:
+
+ HEADER = "!2BHxBH4x"
+ HEADER_SIZE = struct.calcsize(HEADER)
+ MAX_PAYLOAD = 65535 - struct.calcsize(HEADER)
+
+ def __init__(self, flags, type, track, channel, payload):
+ if len(payload) > Frame.MAX_PAYLOAD:
+ raise ValueError("max payload size exceeded: %s" % len(payload))
+ self.flags = flags
+ self.type = type
+ self.track = track
+ self.channel = channel
+ self.payload = payload
+
+ def isFirstSegment(self):
+ return bool(FIRST_SEG & self.flags)
+
+ def isLastSegment(self):
+ return bool(LAST_SEG & self.flags)
+
+ def isFirstFrame(self):
+ return bool(FIRST_FRM & self.flags)
+
+ def isLastFrame(self):
+ return bool(LAST_FRM & self.flags)
+
+ def __repr__(self):
+ return "%s%s%s%s %s %s %s %r" % (int(self.isFirstSegment()),
+ int(self.isLastSegment()),
+ int(self.isFirstFrame()),
+ int(self.isLastFrame()),
+ self.type,
+ self.track,
+ self.channel,
+ self.payload)
+
+class Segment:
+
+ def __init__(self, first, last, type, track, channel, payload):
+ self.id = None
+ self.offset = None
+ self.first = first
+ self.last = last
+ self.type = type
+ self.track = track
+ self.channel = channel
+ self.payload = payload
+
+ def __repr__(self):
+ return "%s%s %s %s %s %r" % (int(self.first), int(self.last), self.type,
+ self.track, self.channel, self.payload)
+
+class FrameDecoder:
+
+ def __init__(self):
+ self.input = ""
+ self.output = []
+ self.parse = self.__frame_header
+
+ def write(self, bytes):
+ self.input += bytes
+ while True:
+ next = self.parse()
+ if next is None:
+ break
+ else:
+ self.parse = next
+
+ def __consume(self, n):
+ result = self.input[:n]
+ self.input = self.input[n:]
+ return result
+
+ def __frame_header(self):
+ if len(self.input) >= Frame.HEADER_SIZE:
+ st = self.__consume(Frame.HEADER_SIZE)
+ self.flags, self.type, self.size, self.track, self.channel = \
+ struct.unpack(Frame.HEADER, st)
+ return self.__frame_body
+
+ def __frame_body(self):
+ size = self.size - Frame.HEADER_SIZE
+ if len(self.input) >= size:
+ payload = self.__consume(size)
+ frame = Frame(self.flags, self.type, self.track, self.channel, payload)
+ self.output.append(frame)
+ return self.__frame_header
+
+ def read(self):
+ result = self.output
+ self.output = []
+ return result
+
+class FrameEncoder:
+
+ def __init__(self):
+ self.output = ""
+
+ def write(self, *frames):
+ for frame in frames:
+ size = len(frame.payload) + Frame.HEADER_SIZE
+ track = frame.track & 0x0F
+ self.output += struct.pack(Frame.HEADER, frame.flags, frame.type, size,
+ track, frame.channel)
+ self.output += frame.payload
+
+ def read(self):
+ result = self.output
+ self.output = ""
+ return result
+
+class SegmentDecoder:
+
+ def __init__(self):
+ self.fragments = {}
+ self.segments = []
+
+ def write(self, *frames):
+ for frm in frames:
+ key = (frm.channel, frm.track)
+ seg = self.fragments.get(key)
+
+ if seg == None:
+ seg = Segment(frm.isFirstSegment(), frm.isLastSegment(),
+ frm.type, frm.track, frm.channel, "")
+ self.fragments[key] = seg
+
+ seg.payload += frm.payload
+
+ if frm.isLastFrame():
+ self.fragments.pop(key)
+ self.segments.append(seg)
+
+ def read(self):
+ result = self.segments
+ self.segments = []
+ return result
+
+class SegmentEncoder:
+
+ def __init__(self, max_payload=Frame.MAX_PAYLOAD):
+ self.max_payload = max_payload
+ self.frames = []
+
+ def write(self, *segments):
+ for seg in segments:
+ remaining = seg.payload
+
+ first = True
+ while first or remaining:
+ payload = remaining[:self.max_payload]
+ remaining = remaining[self.max_payload:]
+
+ flags = 0
+ if first:
+ flags |= FIRST_FRM
+ first = False
+ if not remaining:
+ flags |= LAST_FRM
+ if seg.first:
+ flags |= FIRST_SEG
+ if seg.last:
+ flags |= LAST_SEG
+
+ frm = Frame(flags, seg.type, seg.track, seg.channel, payload)
+ self.frames.append(frm)
+
+ def read(self):
+ result = self.frames
+ self.frames = []
+ return result
+
+from ops import COMMANDS, CONTROLS, COMPOUND, Header, segment_type, track
+from spec import SPEC
+
+from codec010 import StringCodec
+
+class OpEncoder:
+
+ def __init__(self):
+ self.segments = []
+
+ def write(self, *ops):
+ for op in ops:
+ if COMMANDS.has_key(op.NAME):
+ seg_type = segment_type.command
+ seg_track = track.command
+ enc = self.encode_command(op)
+ elif CONTROLS.has_key(op.NAME):
+ seg_type = segment_type.control
+ seg_track = track.control
+ enc = self.encode_compound(op)
+ else:
+ raise ValueError(op)
+ seg = Segment(True, False, seg_type, seg_track, op.channel, enc)
+ self.segments.append(seg)
+ if hasattr(op, "headers") and op.headers is not None:
+ hdrs = ""
+ for h in op.headers:
+ hdrs += self.encode_compound(h)
+ seg = Segment(False, False, segment_type.header, seg_track, op.channel,
+ hdrs)
+ self.segments.append(seg)
+ if hasattr(op, "payload") and op.payload is not None:
+ self.segments.append(Segment(False, False, segment_type.body, seg_track,
+ op.channel, op.payload))
+ self.segments[-1].last = True
+
+ def encode_command(self, cmd):
+ sc = StringCodec()
+ sc.write_uint16(cmd.CODE)
+ sc.write_compound(Header(sync=cmd.sync))
+ sc.write_fields(cmd)
+ return sc.encoded
+
+ def encode_compound(self, op):
+ sc = StringCodec()
+ sc.write_compound(op)
+ return sc.encoded
+
+ def read(self):
+ result = self.segments
+ self.segments = []
+ return result
+
+class OpDecoder:
+
+ def __init__(self):
+ self.op = None
+ self.ops = []
+
+ def write(self, *segments):
+ for seg in segments:
+ if seg.first:
+ if seg.type == segment_type.command:
+ self.op = self.decode_command(seg.payload)
+ elif seg.type == segment_type.control:
+ self.op = self.decode_control(seg.payload)
+ else:
+ raise ValueError(seg)
+ self.op.channel = seg.channel
+ elif seg.type == segment_type.header:
+ if self.op.headers is None:
+ self.op.headers = []
+ self.op.headers.extend(self.decode_headers(seg.payload))
+ elif seg.type == segment_type.body:
+ if self.op.payload is None:
+ self.op.payload = seg.payload
+ else:
+ self.op.payload += seg.payload
+ if seg.last:
+ self.ops.append(self.op)
+ self.op = None
+
+ def decode_command(self, encoded):
+ sc = StringCodec(encoded)
+ code = sc.read_uint16()
+ cls = COMMANDS[code]
+ hdr = sc.read_compound(Header)
+ cmd = cls()
+ sc.read_fields(cmd)
+ cmd.sync = hdr.sync
+ return cmd
+
+ def decode_control(self, encoded):
+ sc = StringCodec(encoded)
+ code = sc.read_uint16()
+ cls = CONTROLS[code]
+ ctl = cls()
+ sc.read_fields(ctl)
+ return ctl
+
+ def decode_headers(self, encoded):
+ sc = StringCodec(encoded)
+ result = []
+ while sc.encoded:
+ result.append(sc.read_struct32())
+ return result
+
+ def read(self):
+ result = self.ops
+ self.ops = []
+ return result
diff --git a/python/qpid/generator.py b/python/qpid/generator.py
new file mode 100644
index 0000000000..02d11e5005
--- /dev/null
+++ b/python/qpid/generator.py
@@ -0,0 +1,56 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import sys
+
+from ops import *
+
+def METHOD(module, op):
+ method = lambda self, *args, **kwargs: self.invoke(op, args, kwargs)
+ if sys.version_info[:2] > (2, 3):
+ method.__name__ = op.__name__
+ method.__doc__ = op.__doc__
+ method.__module__ = module
+ return method
+
+def generate(module, operations):
+ dict = {}
+
+ for name, enum in ENUMS.items():
+ if isinstance(name, basestring):
+ dict[name] = enum
+
+ for name, op in COMPOUND.items():
+ if isinstance(name, basestring):
+ dict[name] = METHOD(module, op)
+
+ for name, op in operations.items():
+ if isinstance(name, basestring):
+ dict[name] = METHOD(module, op)
+
+ return dict
+
+def invoker(name, operations):
+ return type(name, (), generate(invoker.__module__, operations))
+
+def command_invoker():
+ return invoker("CommandInvoker", COMMANDS)
+
+def control_invoker():
+ return invoker("ControlInvoker", CONTROLS)
diff --git a/python/qpid/harness.py b/python/qpid/harness.py
new file mode 100644
index 0000000000..ce48481612
--- /dev/null
+++ b/python/qpid/harness.py
@@ -0,0 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+class Skipped(Exception): pass
diff --git a/python/qpid/invoker.py b/python/qpid/invoker.py
deleted file mode 100644
index 635f3ee769..0000000000
--- a/python/qpid/invoker.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import sys
-
-# TODO: need a better naming for this class now that it does the value
-# stuff
-class Invoker:
-
- def METHOD(self, name, resolved):
- method = lambda *args, **kwargs: self.invoke(resolved, args, kwargs)
- if sys.version_info[:2] > (2, 3):
- method.__name__ = resolved.pyname
- method.__doc__ = resolved.pydoc
- method.__module__ = self.__class__.__module__
- self.__dict__[name] = method
- return method
-
- def VALUE(self, name, resolved):
- self.__dict__[name] = resolved
- return resolved
-
- def ERROR(self, name, resolved):
- raise AttributeError("%s instance has no attribute '%s'" %
- (self.__class__.__name__, name))
-
- def resolve_method(self, name):
- return ERROR, None
-
- def __getattr__(self, name):
- disp, resolved = self.resolve_method(name)
- return disp(name, resolved)
diff --git a/python/qpid/lexer.py b/python/qpid/lexer.py
new file mode 100644
index 0000000000..87845560eb
--- /dev/null
+++ b/python/qpid/lexer.py
@@ -0,0 +1,112 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import re
+
+class Type:
+
+ def __init__(self, name, pattern=None):
+ self.name = name
+ self.pattern = pattern
+
+ def __repr__(self):
+ return self.name
+
+class Lexicon:
+
+ def __init__(self):
+ self.types = []
+ self._eof = None
+
+ def define(self, name, pattern):
+ t = Type(name, pattern)
+ self.types.append(t)
+ return t
+
+ def eof(self, name):
+ t = Type(name)
+ self._eof = t
+ return t
+
+ def compile(self):
+ types = self.types[:]
+ joined = "|".join(["(%s)" % t.pattern for t in types])
+ rexp = re.compile(joined)
+ return Lexer(types, self._eof, rexp)
+
+class Token:
+
+ def __init__(self, type, value, input, position):
+ self.type = type
+ self.value = value
+ self.input = input
+ self.position = position
+
+ def line_info(self):
+ return line_info(self.input, self.position)
+
+ def __repr__(self):
+ if self.value is None:
+ return repr(self.type)
+ else:
+ return "%s(%r)" % (self.type, self.value)
+
+
+class LexError(Exception):
+ pass
+
+def line_info(st, pos):
+ idx = 0
+ lineno = 1
+ column = 0
+ line_pos = 0
+ while idx < pos:
+ if st[idx] == "\n":
+ lineno += 1
+ column = 0
+ line_pos = idx
+ column += 1
+ idx += 1
+
+ end = st.find("\n", line_pos)
+ if end < 0:
+ end = len(st)
+ line = st[line_pos:end]
+
+ return line, lineno, column
+
+class Lexer:
+
+ def __init__(self, types, eof, rexp):
+ self.types = types
+ self.eof = eof
+ self.rexp = rexp
+
+ def lex(self, st):
+ pos = 0
+ while pos < len(st):
+ m = self.rexp.match(st, pos)
+ if m is None:
+ line, ln, col = line_info(st, pos)
+ raise LexError("unrecognized characters line:%s,%s: %s" % (ln, col, line))
+ else:
+ idx = m.lastindex
+ t = Token(self.types[idx - 1], m.group(idx), st, pos)
+ yield t
+ pos = m.end()
+ yield Token(self.eof, None, st, pos)
diff --git a/python/qpid/management.py b/python/qpid/management.py
index 83c29a78a5..325ab4903d 100644
--- a/python/qpid/management.py
+++ b/python/qpid/management.py
@@ -17,6 +17,10 @@
# under the License.
#
+###############################################################################
+## This file is being obsoleted by qmf/console.py
+###############################################################################
+
"""
Management API for Qpid
"""
@@ -69,6 +73,57 @@ class mgmtObject (object):
for cell in row:
setattr (self, cell[0], cell[1])
+class objectId(object):
+ """ Object that represents QMF object identifiers """
+
+ def __init__(self, codec, first=0, second=0):
+ if codec:
+ self.first = codec.read_uint64()
+ self.second = codec.read_uint64()
+ else:
+ self.first = first
+ self.second = second
+
+ def __cmp__(self, other):
+ if other == None:
+ return 1
+ if self.first < other.first:
+ return -1
+ if self.first > other.first:
+ return 1
+ if self.second < other.second:
+ return -1
+ if self.second > other.second:
+ return 1
+ return 0
+
+
+ def index(self):
+ return (self.first, self.second)
+
+ def getFlags(self):
+ return (self.first & 0xF000000000000000) >> 60
+
+ def getSequence(self):
+ return (self.first & 0x0FFF000000000000) >> 48
+
+ def getBroker(self):
+ return (self.first & 0x0000FFFFF0000000) >> 28
+
+ def getBank(self):
+ return self.first & 0x000000000FFFFFFF
+
+ def getObject(self):
+ return self.second
+
+ def isDurable(self):
+ return self.getSequence() == 0
+
+ def encode(self, codec):
+ codec.write_uint64(self.first)
+ codec.write_uint64(self.second)
+
+
class methodResult:
""" Object that contains the result of a method call """
@@ -111,19 +166,23 @@ class managementChannel:
ssn.exchange_bind (exchange="amq.direct",
queue=self.replyName, binding_key=self.replyName)
- ssn.message_subscribe (queue=self.topicName, destination="tdest")
- ssn.message_subscribe (queue=self.replyName, destination="rdest")
+ ssn.message_subscribe (queue=self.topicName, destination="tdest",
+ accept_mode=ssn.accept_mode.none,
+ acquire_mode=ssn.acquire_mode.pre_acquired)
+ ssn.message_subscribe (queue=self.replyName, destination="rdest",
+ accept_mode=ssn.accept_mode.none,
+ acquire_mode=ssn.acquire_mode.pre_acquired)
ssn.incoming ("tdest").listen (self.topicCb, self.exceptionCb)
ssn.incoming ("rdest").listen (self.replyCb)
ssn.message_set_flow_mode (destination="tdest", flow_mode=1)
- ssn.message_flow (destination="tdest", unit=0, value=0xFFFFFFFF)
- ssn.message_flow (destination="tdest", unit=1, value=0xFFFFFFFF)
+ ssn.message_flow (destination="tdest", unit=0, value=0xFFFFFFFFL)
+ ssn.message_flow (destination="tdest", unit=1, value=0xFFFFFFFFL)
ssn.message_set_flow_mode (destination="rdest", flow_mode=1)
- ssn.message_flow (destination="rdest", unit=0, value=0xFFFFFFFF)
- ssn.message_flow (destination="rdest", unit=1, value=0xFFFFFFFF)
+ ssn.message_flow (destination="rdest", unit=0, value=0xFFFFFFFFL)
+ ssn.message_flow (destination="rdest", unit=1, value=0xFFFFFFFFL)
def setBrokerInfo (self, data):
self.brokerInfo = data
@@ -151,9 +210,6 @@ class managementChannel:
if self.enabled:
self.qpidChannel.message_transfer (destination=exchange, message=msg)
- def accept (self, msg):
- self.qpidChannel.message_accept(RangedSet(msg.id))
-
def message (self, body, routing_key="broker"):
dp = self.qpidChannel.delivery_properties()
dp.routing_key = routing_key
@@ -178,8 +234,7 @@ class managementClient:
#========================================================
# User API - interacts with the class's user
#========================================================
- def __init__ (self, amqpSpec, ctrlCb=None, configCb=None, instCb=None, methodCb=None, closeCb=None):
- self.spec = amqpSpec
+ def __init__ (self, unused=None, ctrlCb=None, configCb=None, instCb=None, methodCb=None, closeCb=None):
self.ctrlCb = ctrlCb
self.configCb = configCb
self.instCb = instCb
@@ -212,7 +267,7 @@ class managementClient:
self.channels.append (mch)
self.incOutstanding (mch)
- codec = Codec (self.spec)
+ codec = Codec ()
self.setHeader (codec, ord ('B'))
msg = mch.message(codec.encoded)
mch.send ("qpid.management", msg)
@@ -229,12 +284,12 @@ class managementClient:
def getObjects (self, channel, userSequence, className, bank=0):
""" Request immediate content from broker """
- codec = Codec (self.spec)
+ codec = Codec ()
self.setHeader (codec, ord ('G'), userSequence)
ft = {}
ft["_class"] = className
codec.write_map (ft)
- msg = channel.message(codec.encoded, routing_key="agent.%d" % bank)
+ msg = channel.message(codec.encoded, routing_key="agent.1.%d" % bank)
channel.send ("qpid.management", msg)
def syncWaitForStable (self, channel):
@@ -297,27 +352,28 @@ class managementClient:
#========================================================
def topicCb (self, ch, msg):
""" Receive messages via the topic queue of a particular channel. """
- codec = Codec (self.spec, msg.body)
- hdr = self.checkHeader (codec)
- if hdr == None:
- raise ValueError ("outer header invalid");
+ codec = Codec (msg.body)
+ while True:
+ hdr = self.checkHeader (codec)
+ if hdr == None:
+ return
- if hdr[0] == 'p':
- self.handlePackageInd (ch, codec)
- elif hdr[0] == 'q':
- self.handleClassInd (ch, codec)
- elif hdr[0] == 'h':
- self.handleHeartbeat (ch, codec)
- else:
- self.parse (ch, codec, hdr[0], hdr[1])
- ch.accept(msg)
+ if hdr[0] == 'p':
+ self.handlePackageInd (ch, codec)
+ elif hdr[0] == 'q':
+ self.handleClassInd (ch, codec)
+ elif hdr[0] == 'h':
+ self.handleHeartbeat (ch, codec)
+ elif hdr[0] == 'e':
+ self.handleEvent (ch, codec)
+ else:
+ self.parse (ch, codec, hdr[0], hdr[1])
def replyCb (self, ch, msg):
""" Receive messages via the reply queue of a particular channel. """
- codec = Codec (self.spec, msg.body)
+ codec = Codec (msg.body)
hdr = self.checkHeader (codec)
if hdr == None:
- ch.accept(msg)
return
if hdr[0] == 'm':
@@ -332,7 +388,6 @@ class managementClient:
self.handleClassInd (ch, codec)
else:
self.parse (ch, codec, hdr[0], hdr[1])
- ch.accept(msg)
def exceptCb (self, ch, data):
if self.closeCb != None:
@@ -345,25 +400,27 @@ class managementClient:
""" Compose the header of a management message. """
codec.write_uint8 (ord ('A'))
codec.write_uint8 (ord ('M'))
- codec.write_uint8 (ord ('1'))
+ codec.write_uint8 (ord ('2'))
codec.write_uint8 (opcode)
codec.write_uint32 (seq)
def checkHeader (self, codec):
- """ Check the header of a management message and extract the opcode and
- class. """
- octet = chr (codec.read_uint8 ())
- if octet != 'A':
- return None
- octet = chr (codec.read_uint8 ())
- if octet != 'M':
+ """ Check the header of a management message and extract the opcode and class. """
+ try:
+ octet = chr (codec.read_uint8 ())
+ if octet != 'A':
+ return None
+ octet = chr (codec.read_uint8 ())
+ if octet != 'M':
+ return None
+ octet = chr (codec.read_uint8 ())
+ if octet != '2':
+ return None
+ opcode = chr (codec.read_uint8 ())
+ seq = codec.read_uint32 ()
+ return (opcode, seq)
+ except:
return None
- octet = chr (codec.read_uint8 ())
- if octet != '1':
- return None
- opcode = chr (codec.read_uint8 ())
- seq = codec.read_uint32 ()
- return (opcode, seq)
def encodeValue (self, codec, value, typecode):
""" Encode, into the codec, a value based on its typecode. """
@@ -380,19 +437,19 @@ class managementClient:
elif typecode == 6:
codec.write_str8 (value)
elif typecode == 7:
- codec.write_vbin32 (value)
+ codec.write_str16 (value)
elif typecode == 8: # ABSTIME
codec.write_uint64 (long (value))
elif typecode == 9: # DELTATIME
codec.write_uint64 (long (value))
elif typecode == 10: # REF
- codec.write_uint64 (long (value))
+ value.encode(codec)
elif typecode == 11: # BOOL
codec.write_uint8 (int (value))
elif typecode == 12: # FLOAT
codec.write_float (float (value))
elif typecode == 13: # DOUBLE
- codec.write_double (double (value))
+ codec.write_double (float (value))
elif typecode == 14: # UUID
codec.write_uuid (value)
elif typecode == 15: # FTABLE
@@ -421,15 +478,15 @@ class managementClient:
elif typecode == 5:
data = codec.read_uint8 ()
elif typecode == 6:
- data = str (codec.read_str8 ())
+ data = codec.read_str8 ()
elif typecode == 7:
- data = codec.read_vbin32 ()
+ data = codec.read_str16 ()
elif typecode == 8: # ABSTIME
data = codec.read_uint64 ()
elif typecode == 9: # DELTATIME
data = codec.read_uint64 ()
elif typecode == 10: # REF
- data = codec.read_uint64 ()
+ data = objectId(codec)
elif typecode == 11: # BOOL
data = codec.read_uint8 ()
elif typecode == 12: # FLOAT
@@ -469,12 +526,14 @@ class managementClient:
if self.ctrlCb != None:
self.ctrlCb (ch.context, self.CTRL_SCHEMA_LOADED, None)
ch.ssn.exchange_bind (exchange="qpid.management",
- queue=ch.topicName, binding_key="mgmt.#")
+ queue=ch.topicName, binding_key="console.#")
+ ch.ssn.exchange_bind (exchange="qpid.management",
+ queue=ch.topicName, binding_key="schema.#")
def handleMethodReply (self, ch, codec, sequence):
status = codec.read_uint32 ()
- sText = str (codec.read_str8 ())
+ sText = codec.read_str16 ()
data = self.seqMgr.release (sequence)
if data == None:
@@ -510,7 +569,7 @@ class managementClient:
def handleCommandComplete (self, ch, codec, seq):
code = codec.read_uint32 ()
- text = str (codec.read_str8 ())
+ text = codec.read_str8 ()
data = (seq, code, text)
context = self.seqMgr.release (seq)
if context == "outstanding":
@@ -530,19 +589,19 @@ class managementClient:
self.ctrlCb (ch.context, self.CTRL_BROKER_INFO, ch.brokerInfo)
# Send a package request
- sendCodec = Codec (self.spec)
+ sendCodec = Codec ()
seq = self.seqMgr.reserve ("outstanding")
self.setHeader (sendCodec, ord ('P'), seq)
smsg = ch.message(sendCodec.encoded)
ch.send ("qpid.management", smsg)
def handlePackageInd (self, ch, codec):
- pname = str (codec.read_str8 ())
+ pname = codec.read_str8 ()
if pname not in self.packages:
self.packages[pname] = {}
# Send a class request
- sendCodec = Codec (self.spec)
+ sendCodec = Codec ()
seq = self.seqMgr.reserve ("outstanding")
self.setHeader (sendCodec, ord ('Q'), seq)
self.incOutstanding (ch)
@@ -551,15 +610,18 @@ class managementClient:
ch.send ("qpid.management", smsg)
def handleClassInd (self, ch, codec):
- pname = str (codec.read_str8 ())
- cname = str (codec.read_str8 ())
- hash = codec.read_bin128 ()
+ kind = codec.read_uint8()
+ if kind != 1: # This API doesn't handle new-style events
+ return
+ pname = codec.read_str8()
+ cname = codec.read_str8()
+ hash = codec.read_bin128()
if pname not in self.packages:
return
if (cname, hash) not in self.packages[pname]:
# Send a schema request
- sendCodec = Codec (self.spec)
+ sendCodec = Codec ()
seq = self.seqMgr.reserve ("outstanding")
self.setHeader (sendCodec, ord ('S'), seq)
self.incOutstanding (ch)
@@ -574,16 +636,49 @@ class managementClient:
if self.ctrlCb != None:
self.ctrlCb (ch.context, self.CTRL_HEARTBEAT, timestamp)
+ def handleEvent (self, ch, codec):
+ if self.eventCb == None:
+ return
+ timestamp = codec.read_uint64()
+ objId = objectId(codec)
+ packageName = codec.read_str8()
+ className = codec.read_str8()
+ hash = codec.read_bin128()
+ name = codec.read_str8()
+ classKey = (packageName, className, hash)
+ if classKey not in self.schema:
+ return;
+ schemaClass = self.schema[classKey]
+ row = []
+ es = schemaClass['E']
+ arglist = None
+ for ename in es:
+ (edesc, eargs) = es[ename]
+ if ename == name:
+ arglist = eargs
+ if arglist == None:
+ return
+ for arg in arglist:
+ row.append((arg[0], self.decodeValue(codec, arg[1])))
+ self.eventCb(ch.context, classKey, objId, name, row)
+
def parseSchema (self, ch, codec):
""" Parse a received schema-description message. """
self.decOutstanding (ch)
- packageName = str (codec.read_str8 ())
- className = str (codec.read_str8 ())
+ kind = codec.read_uint8()
+ if kind != 1: # This API doesn't handle new-style events
+ return
+ packageName = codec.read_str8 ()
+ className = codec.read_str8 ()
hash = codec.read_bin128 ()
+ hasSupertype = 0 #codec.read_uint8()
configCount = codec.read_uint16 ()
instCount = codec.read_uint16 ()
methodCount = codec.read_uint16 ()
- eventCount = codec.read_uint16 ()
+ if hasSupertype != 0:
+ supertypePackage = codec.read_str8()
+ supertypeClass = codec.read_str8()
+ supertypeHash = codec.read_bin128()
if packageName not in self.packages:
return
@@ -597,22 +692,22 @@ class managementClient:
configs = []
insts = []
methods = {}
- events = []
configs.append (("id", 4, "", "", 1, 1, None, None, None, None, None))
insts.append (("id", 4, None, None))
for idx in range (configCount):
ft = codec.read_map ()
- name = str (ft["name"])
- type = ft["type"]
- access = ft["access"]
- index = ft["index"]
- unit = None
- min = None
- max = None
- maxlen = None
- desc = None
+ name = str (ft["name"])
+ type = ft["type"]
+ access = ft["access"]
+ index = ft["index"]
+ optional = ft["optional"]
+ unit = None
+ min = None
+ max = None
+ maxlen = None
+ desc = None
for key, value in ft.items ():
if key == "unit":
@@ -626,7 +721,7 @@ class managementClient:
elif key == "desc":
desc = str (value)
- config = (name, type, unit, desc, access, index, min, max, maxlen)
+ config = (name, type, unit, desc, access, index, min, max, maxlen, optional)
configs.append (config)
for idx in range (instCount):
@@ -689,11 +784,26 @@ class managementClient:
schemaClass['C'] = configs
schemaClass['I'] = insts
schemaClass['M'] = methods
- schemaClass['E'] = events
self.schema[classKey] = schemaClass
if self.schemaCb != None:
- self.schemaCb (ch.context, classKey, configs, insts, methods, events)
+ self.schemaCb (ch.context, classKey, configs, insts, methods, {})
+
+ def parsePresenceMasks(self, codec, schemaClass):
+ """ Generate a list of not-present properties """
+ excludeList = []
+ bit = 0
+ for element in schemaClass['C'][1:]:
+ if element[9] == 1:
+ if bit == 0:
+ mask = codec.read_uint8()
+ bit = 1
+ if (mask & bit) == 0:
+ excludeList.append(element[0])
+ bit = bit * 2
+ if bit == 256:
+ bit = 0
+ return excludeList
def parseContent (self, ch, cls, codec, seq=0):
""" Parse a received content message. """
@@ -702,8 +812,8 @@ class managementClient:
if cls == 'I' and self.instCb == None:
return
- packageName = str (codec.read_str8 ())
- className = str (codec.read_str8 ())
+ packageName = codec.read_str8 ()
+ className = codec.read_str8 ()
hash = codec.read_bin128 ()
classKey = (packageName, className, hash)
@@ -716,21 +826,26 @@ class managementClient:
timestamps.append (codec.read_uint64 ()) # Current Time
timestamps.append (codec.read_uint64 ()) # Create Time
timestamps.append (codec.read_uint64 ()) # Delete Time
-
+ objId = objectId(codec)
schemaClass = self.schema[classKey]
if cls == 'C' or cls == 'B':
- for element in schemaClass['C'][:]:
+ notPresent = self.parsePresenceMasks(codec, schemaClass)
+
+ if cls == 'C' or cls == 'B':
+ row.append(("id", objId))
+ for element in schemaClass['C'][1:]:
tc = element[1]
name = element[0]
- data = self.decodeValue (codec, tc)
- row.append ((name, data))
+ if name in notPresent:
+ row.append((name, None))
+ else:
+ data = self.decodeValue(codec, tc)
+ row.append((name, data))
if cls == 'I' or cls == 'B':
- if cls == 'B':
- start = 1
- else:
- start = 0
- for element in schemaClass['I'][start:]:
+ if cls == 'I':
+ row.append(("id", objId))
+ for element in schemaClass['I'][1:]:
tc = element[1]
name = element[0]
data = self.decodeValue (codec, tc)
@@ -760,12 +875,15 @@ class managementClient:
def method (self, channel, userSequence, objId, classId, methodName, args):
""" Invoke a method on an object """
- codec = Codec (self.spec)
+ codec = Codec ()
sequence = self.seqMgr.reserve ((userSequence, classId, methodName))
self.setHeader (codec, ord ('M'), sequence)
- codec.write_uint64 (objId) # ID of object
+ objId.encode(codec)
+ codec.write_str8 (classId[0])
+ codec.write_str8 (classId[1])
+ codec.write_bin128 (classId[2])
codec.write_str8 (methodName)
- bank = (objId & 0x0000FFFFFF000000) >> 24
+ bank = "%d.%d" % (objId.getBroker(), objId.getBank())
# Encode args according to schema
if classId not in self.schema:
@@ -795,5 +913,5 @@ class managementClient:
packageName = classId[0]
className = classId[1]
- msg = channel.message(codec.encoded, "agent." + str(bank))
+ msg = channel.message(codec.encoded, "agent." + bank)
channel.send ("qpid.management", msg)
diff --git a/python/qpid/managementdata.py b/python/qpid/managementdata.py
index fc9eb391b7..61cb10c134 100644
--- a/python/qpid/managementdata.py
+++ b/python/qpid/managementdata.py
@@ -19,11 +19,19 @@
# under the License.
#
+
+###############################################################################
+## This file is being obsoleted by qmf/console.py
+###############################################################################
+
import qpid
import re
import socket
import struct
import os
+import platform
+import locale
+from qpid.connection import Timeout
from qpid.management import managementChannel, managementClient
from threading import Lock
from disp import Display
@@ -40,9 +48,11 @@ class Broker:
if not match: raise ValueError("'%s' is not a valid broker url" % (text))
user, password, host, port = match.groups()
- self.host = socket.gethostbyname (host)
if port: self.port = int(port)
else: self.port = 5672
+ for addr in socket.getaddrinfo(host, self.port):
+ if addr[1] == socket.AF_INET:
+ self.host = addr[4][0]
self.username = user or "guest"
self.password = password or "guest"
@@ -71,14 +81,14 @@ class ManagementData:
#
def registerObjId (self, objId):
- if not objId in self.idBackMap:
- self.idBackMap[objId] = self.nextId
+ if not objId.index() in self.idBackMap:
+ self.idBackMap[objId.index()] = self.nextId
self.idMap[self.nextId] = objId
self.nextId += 1
- def displayObjId (self, objId):
- if objId in self.idBackMap:
- return self.idBackMap[objId]
+ def displayObjId (self, objIdIndex):
+ if objIdIndex in self.idBackMap:
+ return self.idBackMap[objIdIndex]
else:
return 0
@@ -86,11 +96,16 @@ class ManagementData:
if displayId in self.idMap:
return self.idMap[displayId]
else:
- return 0
+ return None
def displayClassName (self, cls):
(packageName, className, hash) = cls
- return packageName + "." + className
+ rev = self.schema[cls][4]
+ if rev == 0:
+ suffix = ""
+ else:
+ suffix = ".%d" % rev
+ return packageName + ":" + className + suffix
def dataHandler (self, context, className, list, timestamps):
""" Callback for configuration and instrumentation data updates """
@@ -102,19 +117,20 @@ class ManagementData:
self.tables[className] = {}
# Register the ID so a more friendly presentation can be displayed
- id = long (list[0][1])
- self.registerObjId (id)
+ objId = list[0][1]
+ oidx = objId.index()
+ self.registerObjId (objId)
# If this object hasn't been seen before, create a new object record with
# the timestamps and empty lists for configuration and instrumentation data.
- if id not in self.tables[className]:
- self.tables[className][id] = (timestamps, [], [])
+ if oidx not in self.tables[className]:
+ self.tables[className][oidx] = (timestamps, [], [])
- (unused, oldConf, oldInst) = self.tables[className][id]
+ (unused, oldConf, oldInst) = self.tables[className][oidx]
# For config updates, simply replace old config list with the new one.
if context == 0: #config
- self.tables[className][id] = (timestamps, list, oldInst)
+ self.tables[className][oidx] = (timestamps, list, oldInst)
# For instrumentation updates, carry the minimum and maximum values for
# "hi-lo" stats forward.
@@ -132,7 +148,7 @@ class ManagementData:
if oldInst[idx][1] < value:
value = oldInst[idx][1]
newInst.append ((key, value))
- self.tables[className][id] = (timestamps, oldConf, newInst)
+ self.tables[className][oidx] = (timestamps, oldConf, newInst)
finally:
self.lock.release ()
@@ -190,15 +206,25 @@ class ManagementData:
self.lastUnit = None
self.methodSeq = 1
self.methodsPending = {}
- self.sessionId = "%s.%d" % (os.uname()[1], os.getpid())
+ self.sessionId = "%s.%d" % (platform.uname()[1], os.getpid())
self.broker = Broker (host)
- self.conn = Connection (connect (self.broker.host, self.broker.port),
+ sock = connect (self.broker.host, self.broker.port)
+ oldTimeout = sock.gettimeout()
+ sock.settimeout(10)
+ self.conn = Connection (sock,
username=self.broker.username, password=self.broker.password)
- self.spec = self.conn.spec
+ def aborted():
+ raise Timeout("Waiting for connection to be established with broker")
+ oldAborted = self.conn.aborted
+ self.conn.aborted = aborted
+
self.conn.start ()
- self.mclient = managementClient (self.spec, self.ctrlHandler, self.configHandler,
+ sock.settimeout(oldTimeout)
+ self.conn.aborted = oldAborted
+
+ self.mclient = managementClient ("unused", self.ctrlHandler, self.configHandler,
self.instHandler, self.methodReply, self.closeHandler)
self.mclient.schemaListener (self.schemaHandler)
self.mch = self.mclient.addChannel (self.conn.session(self.sessionId))
@@ -211,11 +237,13 @@ class ManagementData:
pass
def refName (self, oid):
- if oid == 0:
+ if oid == None:
return "NULL"
- return str (self.displayObjId (oid))
+ return str (self.displayObjId (oid.index()))
def valueDisplay (self, classKey, key, value):
+ if value == None:
+ return "<NULL>"
for kind in range (2):
schema = self.schema[classKey][kind]
for item in schema:
@@ -248,7 +276,7 @@ class ManagementData:
else:
return "True"
elif typecode == 14:
- return "%08x-%04x-%04x-%04x-%04x%08x" % struct.unpack ("!LHHHHL", value)
+ return str (value)
elif typecode == 15:
return str (value)
return "*type-error*"
@@ -267,14 +295,21 @@ class ManagementData:
return result
def getClassKey (self, className):
- dotPos = className.find(".")
- if dotPos == -1:
+ delimPos = className.find(":")
+ if delimPos == -1:
+ schemaRev = 0
+ delim = className.find(".")
+ if delim != -1:
+ schemaRev = int(className[delim + 1:])
+ name = className[0:delim]
+ else:
+ name = className
for key in self.schema:
- if key[1] == className:
+ if key[1] == name and self.schema[key][4] == schemaRev:
return key
else:
- package = className[0:dotPos]
- name = className[dotPos + 1:]
+ package = className[0:delimPos]
+ name = className[delimPos + 1:]
schemaRev = 0
delim = name.find(".")
if delim != -1:
@@ -338,6 +373,12 @@ class ManagementData:
return "int32"
elif typecode == 19:
return "int64"
+ elif typecode == 20:
+ return "object"
+ elif typecode == 21:
+ return "list"
+ elif typecode == 22:
+ return "array"
else:
raise ValueError ("Invalid type code: %d" % typecode)
@@ -437,7 +478,7 @@ class ManagementData:
if classKey in self.tables:
ids = self.listOfIds(classKey, tokens[1:])
for objId in ids:
- (ts, config, inst) = self.tables[classKey][self.rawObjId(objId)]
+ (ts, config, inst) = self.tables[classKey][self.rawObjId(objId).index()]
createTime = self.disp.timestamp (ts[1])
destroyTime = "-"
if ts[2] > 0:
@@ -445,7 +486,7 @@ class ManagementData:
objIndex = self.getObjIndex (classKey, config)
row = (objId, createTime, destroyTime, objIndex)
rows.append (row)
- self.disp.table ("Objects of type %s.%s" % (classKey[0], classKey[1]),
+ self.disp.table ("Objects of type %s" % self.displayClassName(classKey),
("ID", "Created", "Destroyed", "Index"),
rows)
finally:
@@ -486,33 +527,33 @@ class ManagementData:
rows = []
timestamp = None
- config = self.tables[classKey][ids[0]][1]
+ config = self.tables[classKey][ids[0].index()][1]
for eIdx in range (len (config)):
key = config[eIdx][0]
if key != "id":
row = ("property", key)
for id in ids:
if timestamp == None or \
- timestamp < self.tables[classKey][id][0][0]:
- timestamp = self.tables[classKey][id][0][0]
- (key, value) = self.tables[classKey][id][1][eIdx]
+ timestamp < self.tables[classKey][id.index()][0][0]:
+ timestamp = self.tables[classKey][id.index()][0][0]
+ (key, value) = self.tables[classKey][id.index()][1][eIdx]
row = row + (self.valueDisplay (classKey, key, value),)
rows.append (row)
- inst = self.tables[classKey][ids[0]][2]
+ inst = self.tables[classKey][ids[0].index()][2]
for eIdx in range (len (inst)):
key = inst[eIdx][0]
if key != "id":
row = ("statistic", key)
for id in ids:
- (key, value) = self.tables[classKey][id][2][eIdx]
+ (key, value) = self.tables[classKey][id.index()][2][eIdx]
row = row + (self.valueDisplay (classKey, key, value),)
rows.append (row)
titleRow = ("Type", "Element")
for id in ids:
- titleRow = titleRow + (self.refName (id),)
- caption = "Object of type %s.%s:" % (classKey[0], classKey[1])
+ titleRow = titleRow + (self.refName(id),)
+ caption = "Object of type %s:" % self.displayClassName(classKey)
if timestamp != None:
caption = caption + " (last sample time: " + self.disp.timestamp (timestamp) + ")"
self.disp.table (caption, titleRow, rows)
@@ -530,15 +571,11 @@ class ManagementData:
sorted.sort ()
for classKey in sorted:
tuple = self.schema[classKey]
- if tuple[4] == 0:
- suffix = ""
- else:
- suffix = ".%d" % tuple[4]
- className = classKey[0] + "." + classKey[1] + suffix
- row = (className, len (tuple[0]), len (tuple[1]), len (tuple[2]), len (tuple[3]))
+ row = (self.displayClassName(classKey), len (tuple[0]), len (tuple[1]),
+ len (tuple[2]))
rows.append (row)
self.disp.table ("Classes in Schema:",
- ("Class", "Properties", "Statistics", "Methods", "Events"),
+ ("Class", "Properties", "Statistics", "Methods"),
rows)
finally:
self.lock.release ()
@@ -563,13 +600,15 @@ class ManagementData:
access = self.accessName (config[4])
extra = ""
if config[5] == 1:
- extra = extra + "index "
+ extra += "index "
if config[6] != None:
- extra = extra + "Min: " + str (config[6])
+ extra += "Min: " + str(config[6]) + " "
if config[7] != None:
- extra = extra + "Max: " + str (config[7])
+ extra += "Max: " + str(config[7]) + " "
if config[8] != None:
- extra = extra + "MaxLen: " + str (config[8])
+ extra += "MaxLen: " + str(config[8]) + " "
+ if config[9] == 1:
+ extra += "optional "
rows.append ((name, typename, unit, access, extra, desc))
for config in self.schema[classKey][1]:
@@ -581,7 +620,7 @@ class ManagementData:
rows.append ((name, typename, unit, "", "", desc))
titles = ("Element", "Type", "Unit", "Access", "Notes", "Description")
- self.disp.table ("Schema for class '%s.%s.%d':" % (classKey[0], classKey[1], schemaRev), titles, rows)
+ self.disp.table ("Schema for class '%s':" % self.displayClassName(classKey), titles, rows)
for mname in self.schema[classKey][2]:
(mdesc, args) = self.schema[classKey][2][mname]
@@ -606,14 +645,14 @@ class ManagementData:
titles = ("Argument", "Type", "Direction", "Unit", "Notes", "Description")
self.disp.table (caption, titles, rows)
- except:
+ except Exception,e:
pass
self.lock.release ()
def getClassForId (self, objId):
""" Given an object ID, return the class key for the referenced object """
for classKey in self.tables:
- if objId in self.tables[classKey]:
+ if objId.index() in self.tables[classKey]:
return classKey
return None
@@ -626,7 +665,7 @@ class ManagementData:
raise ValueError ()
if methodName not in self.schema[classKey][2]:
- print "Method '%s' not valid for class '%s.%s'" % (methodName, classKey[0], classKey[1])
+ print "Method '%s' not valid for class '%s'" % (methodName, self.displayClassName(classKey))
raise ValueError ()
schemaMethod = self.schema[classKey][2][methodName]
@@ -647,7 +686,7 @@ class ManagementData:
self.methodSeq = self.methodSeq + 1
self.methodsPending[self.methodSeq] = methodName
- except:
+ except Exception, e:
methodOk = False
self.lock.release ()
if methodOk:
@@ -659,14 +698,19 @@ class ManagementData:
def makeIdRow (self, displayId):
if displayId in self.idMap:
- rawId = self.idMap[displayId]
+ objId = self.idMap[displayId]
else:
return None
- return (displayId,
- rawId,
- (rawId & 0x7FFF000000000000) >> 48,
- (rawId & 0x0000FFFFFF000000) >> 24,
- (rawId & 0x0000000000FFFFFF))
+ if objId.getFlags() == 0:
+ flags = ""
+ else:
+ flags = str(objId.getFlags())
+ seq = objId.getSequence()
+ if seq == 0:
+ seqText = "<durable>"
+ else:
+ seqText = str(seq)
+ return (displayId, flags, seqText, objId.getBroker(), objId.getBank(), hex(objId.getObject()))
def listIds (self, select):
rows = []
@@ -683,7 +727,7 @@ class ManagementData:
return
rows.append(row)
self.disp.table("Translation of Display IDs:",
- ("DisplayID", "RawID", "BootSequence", "Bank", "Object"),
+ ("DisplayID", "Flags", "BootSequence", "Broker", "Bank", "Object"),
rows)
def do_list (self, data):
@@ -704,7 +748,11 @@ class ManagementData:
self.schemaTable (data)
def do_call (self, data):
- tokens = data.split ()
+ encTokens = data.split ()
+ try:
+ tokens = [a.decode(locale.getpreferredencoding()) for a in encArgs]
+ except:
+ tokens = encTokens
if len (tokens) < 2:
print "Not enough arguments supplied"
return
diff --git a/python/qpid/message.py b/python/qpid/message.py
index eb3ef5c03c..4d31da2846 100644
--- a/python/qpid/message.py
+++ b/python/qpid/message.py
@@ -17,7 +17,6 @@
# under the License.
#
from connection08 import Method, Request
-from sets import Set
class Message:
diff --git a/python/qpid/messaging.py b/python/qpid/messaging.py
new file mode 100644
index 0000000000..4f2c190ce2
--- /dev/null
+++ b/python/qpid/messaging.py
@@ -0,0 +1,822 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+A candidate high level messaging API for python.
+
+Areas that still need work:
+
+ - asynchronous send
+ - asynchronous error notification
+ - definition of the arguments for L{Session.sender} and L{Session.receiver}
+ - standard L{Message} properties
+ - L{Message} content encoding
+ - protocol negotiation/multiprotocol impl
+"""
+
+from codec010 import StringCodec
+from concurrency import synchronized, Waiter, Condition
+from datatypes import timestamp, uuid4, Serial
+from logging import getLogger
+from ops import PRIMITIVE
+from threading import Thread, RLock
+from util import default
+
+log = getLogger("qpid.messaging")
+
+static = staticmethod
+
+AMQP_PORT = 5672
+AMQPS_PORT = 5671
+
+class Constant:
+
+ def __init__(self, name, value=None):
+ self.name = name
+ self.value = value
+
+ def __repr__(self):
+ return self.name
+
+UNLIMITED = Constant("UNLIMITED", 0xFFFFFFFFL)
+
+class ConnectionError(Exception):
+ """
+ The base class for all connection related exceptions.
+ """
+ pass
+
+class ConnectError(ConnectionError):
+ """
+ Exception raised when there is an error connecting to the remote
+ peer.
+ """
+ pass
+
+class Connection:
+
+ """
+ A Connection manages a group of L{Sessions<Session>} and connects
+ them with a remote endpoint.
+ """
+
+ @static
+ def open(host, port=None, username="guest", password="guest",
+ mechanism="PLAIN", heartbeat=None, **options):
+ """
+ Creates an AMQP connection and connects it to the given host and port.
+
+ @type host: str
+ @param host: the name or ip address of the remote host
+ @type port: int
+ @param port: the port number of the remote host
+ @rtype: Connection
+ @return: a connected Connection
+ """
+ conn = Connection(host, port, username, password, mechanism, heartbeat, **options)
+ conn.connect()
+ return conn
+
+ def __init__(self, host, port=None, username="guest", password="guest",
+ mechanism="PLAIN", heartbeat=None, **options):
+ """
+ Creates a connection. A newly created connection must be connected
+ with the Connection.connect() method before it can be used.
+
+ @type host: str
+ @param host: the name or ip address of the remote host
+ @type port: int
+ @param port: the port number of the remote host
+ @rtype: Connection
+ @return: a disconnected Connection
+ """
+ self.host = host
+ self.port = default(port, AMQP_PORT)
+ self.username = username
+ self.password = password
+ self.mechanism = mechanism
+ self.heartbeat = heartbeat
+
+ self.id = str(uuid4())
+ self.session_counter = 0
+ self.sessions = {}
+ self.reconnect = options.get("reconnect", False)
+ self._connected = False
+ self._lock = RLock()
+ self._condition = Condition(self._lock)
+ self._waiter = Waiter(self._condition)
+ self._modcount = Serial(0)
+ self.error = None
+ from driver import Driver
+ self._driver = Driver(self)
+ self._driver.start()
+
+ def _wait(self, predicate, timeout=None):
+ return self._waiter.wait(predicate, timeout=timeout)
+
+ def _wakeup(self):
+ self._modcount += 1
+ self._driver.wakeup()
+
+ def _check_error(self, exc=ConnectionError):
+ if self.error:
+ raise exc(*self.error)
+
+ def _ewait(self, predicate, timeout=None, exc=ConnectionError):
+ result = self._wait(lambda: self.error or predicate(), timeout)
+ self._check_error(exc)
+ return result
+
+ @synchronized
+ def session(self, name=None, transactional=False):
+ """
+ Creates or retrieves the named session. If the name is omitted or
+ None, then a unique name is chosen based on a randomly generated
+ uuid.
+
+ @type name: str
+ @param name: the session name
+ @rtype: Session
+ @return: the named Session
+ """
+
+ if name is None:
+ name = "%s:%s" % (self.id, self.session_counter)
+ self.session_counter += 1
+ else:
+ name = "%s:%s" % (self.id, name)
+
+ if self.sessions.has_key(name):
+ return self.sessions[name]
+ else:
+ ssn = Session(self, name, transactional)
+ self.sessions[name] = ssn
+ self._wakeup()
+ return ssn
+
+ @synchronized
+ def _remove_session(self, ssn):
+ del self.sessions[ssn.name]
+
+ @synchronized
+ def connect(self):
+ """
+ Connect to the remote endpoint.
+ """
+ self._connected = True
+ self._wakeup()
+ self._ewait(lambda: self._driver._connected, exc=ConnectError)
+
+ @synchronized
+ def disconnect(self):
+ """
+ Disconnect from the remote endpoint.
+ """
+ self._connected = False
+ self._wakeup()
+ self._ewait(lambda: not self._driver._connected)
+
+ @synchronized
+ def connected(self):
+ """
+ Return true if the connection is connected, false otherwise.
+ """
+ return self._connected
+
+ @synchronized
+ def close(self):
+ """
+ Close the connection and all sessions.
+ """
+ for ssn in self.sessions.values():
+ ssn.close()
+ self.disconnect()
+
+class Pattern:
+ """
+ The pattern filter matches the supplied wildcard pattern against a
+ message subject.
+ """
+
+ def __init__(self, value):
+ self.value = value
+
+ # XXX: this should become part of the driver
+ def _bind(self, sst, exchange, queue):
+ from qpid.ops import ExchangeBind
+ sst.write_cmd(ExchangeBind(exchange=exchange, queue=queue,
+ binding_key=self.value.replace("*", "#")))
+
+class SessionError(Exception):
+ pass
+
+class Disconnected(SessionError):
+ """
+ Exception raised when an operation is attempted that is illegal when
+ disconnected.
+ """
+ pass
+
+class NontransactionalSession(SessionError):
+ """
+ Exception raised when commit or rollback is attempted on a non
+ transactional session.
+ """
+ pass
+
+class TransactionAborted(SessionError):
+ pass
+
+class Session:
+
+ """
+ Sessions provide a linear context for sending and receiving
+ messages, and manage various Senders and Receivers.
+ """
+
+ def __init__(self, connection, name, transactional):
+ self.connection = connection
+ self.name = name
+
+ self.transactional = transactional
+
+ self.committing = False
+ self.committed = True
+ self.aborting = False
+ self.aborted = False
+
+ self.senders = []
+ self.receivers = []
+ self.outgoing = []
+ self.incoming = []
+ self.unacked = []
+ self.acked = []
+ # XXX: I hate this name.
+ self.ack_capacity = UNLIMITED
+
+ self.error = None
+ self.closing = False
+ self.closed = False
+
+ self._lock = connection._lock
+
+ def __repr__(self):
+ return "<Session %s>" % self.name
+
+ def _wait(self, predicate, timeout=None):
+ return self.connection._wait(predicate, timeout=timeout)
+
+ def _wakeup(self):
+ self.connection._wakeup()
+
+ def _check_error(self, exc=SessionError):
+ self.connection._check_error(exc)
+ if self.error:
+ raise exc(*self.error)
+
+ def _ewait(self, predicate, timeout=None, exc=SessionError):
+ result = self.connection._ewait(lambda: self.error or predicate(), timeout, exc)
+ self._check_error(exc)
+ return result
+
+ @synchronized
+ def sender(self, target, **options):
+ """
+ Creates a L{Sender} that may be used to send L{Messages<Message>}
+ to the specified target.
+
+ @type target: str
+ @param target: the target to which messages will be sent
+ @rtype: Sender
+ @return: a new Sender for the specified target
+ """
+ sender = Sender(self, len(self.senders), target, options)
+ self.senders.append(sender)
+ self._wakeup()
+ # XXX: because of the lack of waiting here we can end up getting
+ # into the driver loop with messages sent for senders that haven't
+ # been linked yet, something similar can probably happen for
+ # receivers
+ return sender
+
+ @synchronized
+ def receiver(self, source, **options):
+ """
+ Creates a receiver that may be used to fetch L{Messages<Message>}
+ from the specified source.
+
+ @type source: str
+ @param source: the source of L{Messages<Message>}
+ @rtype: Receiver
+ @return: a new Receiver for the specified source
+ """
+ receiver = Receiver(self, len(self.receivers), source, options)
+ self.receivers.append(receiver)
+ self._wakeup()
+ return receiver
+
+ @synchronized
+ def _count(self, predicate):
+ result = 0
+ for msg in self.incoming:
+ if predicate(msg):
+ result += 1
+ return result
+
+ def _peek(self, predicate):
+ for msg in self.incoming:
+ if predicate(msg):
+ return msg
+
+ def _pop(self, predicate):
+ i = 0
+ while i < len(self.incoming):
+ msg = self.incoming[i]
+ if predicate(msg):
+ del self.incoming[i]
+ return msg
+ else:
+ i += 1
+
+ @synchronized
+ def _get(self, predicate, timeout=None):
+ if self._ewait(lambda: ((self._peek(predicate) is not None) or self.closing),
+ timeout):
+ msg = self._pop(predicate)
+ if msg is not None:
+ msg._receiver.returned += 1
+ self.unacked.append(msg)
+ log.debug("RETR [%s] %s", self, msg)
+ return msg
+ return None
+
+ @synchronized
+ def next_receiver(self, timeout=None):
+ if self._ewait(lambda: self.incoming, timeout):
+ return self.incoming[0]._receiver
+ else:
+ raise Empty
+
+ @synchronized
+ def acknowledge(self, message=None, sync=True):
+ """
+ Acknowledge the given L{Message}. If message is None, then all
+ unacknowledged messages on the session are acknowledged.
+
+ @type message: Message
+ @param message: the message to acknowledge or None
+ @type sync: boolean
+ @param sync: if true then block until the message(s) are acknowledged
+ """
+ if message is None:
+ messages = self.unacked[:]
+ else:
+ messages = [message]
+
+ for m in messages:
+ if self.ack_capacity is not UNLIMITED:
+ if self.ack_capacity <= 0:
+ # XXX: this is currently a SendError, maybe it should be a SessionError?
+ raise InsufficientCapacity("ack_capacity = %s" % self.ack_capacity)
+ self._wakeup()
+ self._ewait(lambda: len(self.acked) < self.ack_capacity)
+ self.unacked.remove(m)
+ self.acked.append(m)
+
+ self._wakeup()
+ if sync:
+ self._ewait(lambda: not [m for m in messages if m in self.acked])
+
+ @synchronized
+ def commit(self):
+ """
+ Commit outstanding transactional work. This consists of all
+ message sends and receives since the prior commit or rollback.
+ """
+ if not self.transactional:
+ raise NontransactionalSession()
+ self.committing = True
+ self._wakeup()
+ self._ewait(lambda: not self.committing)
+ if self.aborted:
+ raise TransactionAborted()
+ assert self.committed
+
+ @synchronized
+ def rollback(self):
+ """
+ Rollback outstanding transactional work. This consists of all
+ message sends and receives since the prior commit or rollback.
+ """
+ if not self.transactional:
+ raise NontransactionalSession()
+ self.aborting = True
+ self._wakeup()
+ self._ewait(lambda: not self.aborting)
+ assert self.aborted
+
+ @synchronized
+ def close(self):
+ """
+ Close the session.
+ """
+ # XXX: should be able to express this condition through API calls
+ self._ewait(lambda: not self.outgoing and not self.acked)
+
+ for link in self.receivers + self.senders:
+ link.close()
+
+ self.closing = True
+ self._wakeup()
+ self._ewait(lambda: self.closed)
+ self.connection._remove_session(self)
+
+class SendError(SessionError):
+ pass
+
+class InsufficientCapacity(SendError):
+ pass
+
+class Sender:
+
+ """
+ Sends outgoing messages.
+ """
+
+ def __init__(self, session, index, target, options):
+ self.session = session
+ self.index = index
+ self.target = target
+ self.options = options
+ self.capacity = options.get("capacity", UNLIMITED)
+ self.durable = options.get("durable")
+ self.queued = Serial(0)
+ self.acked = Serial(0)
+ self.error = None
+ self.linked = False
+ self.closing = False
+ self.closed = False
+ self._lock = self.session._lock
+
+ def _wakeup(self):
+ self.session._wakeup()
+
+ def _check_error(self, exc=SendError):
+ self.session._check_error(exc)
+ if self.error:
+ raise exc(*self.error)
+
+ def _ewait(self, predicate, timeout=None, exc=SendError):
+ result = self.session._ewait(lambda: self.error or predicate(), timeout, exc)
+ self._check_error(exc)
+ return result
+
+ @synchronized
+ def pending(self):
+ """
+ Returns the number of messages awaiting acknowledgment.
+ @rtype: int
+ @return: the number of unacknowledged messages
+ """
+ return self.queued - self.acked
+
+ @synchronized
+ def send(self, object, sync=True, timeout=None):
+ """
+ Send a message. If the object passed in is of type L{unicode},
+ L{str}, L{list}, or L{dict}, it will automatically be wrapped in a
+ L{Message} and sent. If it is of type L{Message}, it will be sent
+ directly. If the sender capacity is not L{UNLIMITED} then send
+ will block until there is available capacity to send the message.
+ If the timeout parameter is specified, then send will throw an
+ L{InsufficientCapacity} exception if capacity does not become
+ available within the specified time.
+
+ @type object: unicode, str, list, dict, Message
+ @param object: the message or content to send
+
+ @type sync: boolean
+ @param sync: if true then block until the message is sent
+
+ @type timeout: float
+ @param timeout: the time to wait for available capacity
+ """
+
+ if not self.session.connection._connected or self.session.closing:
+ raise Disconnected()
+
+ self._ewait(lambda: self.linked)
+
+ if isinstance(object, Message):
+ message = object
+ else:
+ message = Message(object)
+
+ if message.durable is None:
+ message.durable = self.durable
+
+ if self.capacity is not UNLIMITED:
+ if self.capacity <= 0:
+ raise InsufficientCapacity("capacity = %s" % self.capacity)
+ if not self._ewait(lambda: self.pending() < self.capacity, timeout=timeout):
+ raise InsufficientCapacity("capacity = %s" % self.capacity)
+
+ # XXX: what if we send the same message to multiple senders?
+ message._sender = self
+ self.session.outgoing.append(message)
+ self.queued += 1
+
+ self._wakeup()
+
+ if sync:
+ self.sync()
+ assert message not in self.session.outgoing
+
+ @synchronized
+ def sync(self):
+ mno = self.queued
+ self._ewait(lambda: self.acked >= mno)
+
+ @synchronized
+ def close(self):
+ """
+ Close the Sender.
+ """
+ self.closing = True
+ self._wakeup()
+ try:
+ self.session._ewait(lambda: self.closed)
+ finally:
+ self.session.senders.remove(self)
+
+class ReceiveError(SessionError):
+ pass
+
+class Empty(ReceiveError):
+ """
+ Exception raised by L{Receiver.fetch} when there is no message
+ available within the alloted time.
+ """
+ pass
+
+class Receiver(object):
+
+ """
+ Receives incoming messages from a remote source. Messages may be
+ fetched with L{fetch}.
+ """
+
+ def __init__(self, session, index, source, options):
+ self.session = session
+ self.index = index
+ self.destination = str(self.index)
+ self.source = source
+ self.options = options
+
+ self.granted = Serial(0)
+ self.draining = False
+ self.impending = Serial(0)
+ self.received = Serial(0)
+ self.returned = Serial(0)
+
+ self.error = None
+ self.linked = False
+ self.closing = False
+ self.closed = False
+ self._lock = self.session._lock
+ self._capacity = 0
+ self._set_capacity(options.get("capacity", 0), False)
+
+ @synchronized
+ def _set_capacity(self, c, wakeup=True):
+ if c is UNLIMITED:
+ self._capacity = c.value
+ else:
+ self._capacity = c
+ self._grant()
+ if wakeup:
+ self._wakeup()
+
+ def _get_capacity(self):
+ if self._capacity == UNLIMITED.value:
+ return UNLIMITED
+ else:
+ return self._capacity
+
+ capacity = property(_get_capacity, _set_capacity)
+
+ def _wakeup(self):
+ self.session._wakeup()
+
+ def _check_error(self, exc=ReceiveError):
+ self.session._check_error(exc)
+ if self.error:
+ raise exc(*self.error)
+
+ def _ewait(self, predicate, timeout=None, exc=ReceiveError):
+ result = self.session._ewait(lambda: self.error or predicate(), timeout, exc)
+ self._check_error(exc)
+ return result
+
+ @synchronized
+ def pending(self):
+ """
+ Returns the number of messages available to be fetched by the
+ application.
+
+ @rtype: int
+ @return: the number of available messages
+ """
+ return self.received - self.returned
+
+ def _pred(self, msg):
+ return msg._receiver == self
+
+ @synchronized
+ def fetch(self, timeout=None):
+ """
+ Fetch and return a single message. A timeout of None will block
+ forever waiting for a message to arrive, a timeout of zero will
+ return immediately if no messages are available.
+
+ @type timeout: float
+ @param timeout: the time to wait for a message to be available
+ """
+
+ self._ewait(lambda: self.linked)
+
+ if self._capacity == 0:
+ self.granted = self.returned + 1
+ self._wakeup()
+ self._ewait(lambda: self.impending >= self.granted)
+ msg = self.session._get(self._pred, timeout=timeout)
+ if msg is None:
+ self.draining = True
+ self._wakeup()
+ self._ewait(lambda: not self.draining)
+ self._grant()
+ self._wakeup()
+ msg = self.session._get(self._pred, timeout=0)
+ if msg is None:
+ raise Empty()
+ elif self._capacity not in (0, UNLIMITED.value):
+ self.granted += 1
+ self._wakeup()
+ return msg
+
+ def _grant(self):
+ if self._capacity == UNLIMITED.value:
+ self.granted = UNLIMITED
+ else:
+ self.granted = self.received + self._capacity
+
+ @synchronized
+ def close(self):
+ """
+ Close the receiver.
+ """
+ self.closing = True
+ self._wakeup()
+ try:
+ self.session._ewait(lambda: self.closed)
+ finally:
+ self.session.receivers.remove(self)
+
+def codec(name):
+ type = PRIMITIVE[name]
+
+ def encode(x):
+ sc = StringCodec()
+ sc.write_primitive(type, x)
+ return sc.encoded
+
+ def decode(x):
+ sc = StringCodec(x)
+ return sc.read_primitive(type)
+
+ return encode, decode
+
+# XXX: need to correctly parse the mime type and deal with
+# content-encoding header
+
+TYPE_MAPPINGS={
+ dict: "amqp/map",
+ list: "amqp/list",
+ unicode: "text/plain; charset=utf8",
+ unicode: "text/plain",
+ buffer: None,
+ str: None,
+ None.__class__: None
+ }
+
+TYPE_CODEC={
+ "amqp/map": codec("map"),
+ "amqp/list": codec("list"),
+ "text/plain; charset=utf8": (lambda x: x.encode("utf8"), lambda x: x.decode("utf8")),
+ "text/plain": (lambda x: x.encode("utf8"), lambda x: x.decode("utf8")),
+ "": (lambda x: x, lambda x: x),
+ None: (lambda x: x, lambda x: x)
+ }
+
+def get_type(content):
+ return TYPE_MAPPINGS[content.__class__]
+
+def get_codec(content_type):
+ return TYPE_CODEC[content_type]
+
+UNSPECIFIED = object()
+
+class Message:
+
+ """
+ A message consists of a standard set of fields, an application
+ defined set of properties, and some content.
+
+ @type id: str
+ @ivar id: the message id
+ @type user_id: ???
+ @ivar user_id: the user-id of the message producer
+ @type to: ???
+ @ivar to: ???
+ @type reply_to: ???
+ @ivar reply_to: ???
+ @type correlation_id: str
+ @ivar correlation_id: a correlation-id for the message
+ @type properties: dict
+ @ivar properties: application specific message properties
+ @type content_type: str
+ @ivar content_type: the content-type of the message
+ @type content: str, unicode, buffer, dict, list
+ @ivar content: the message content
+ """
+
+ def __init__(self, content=None, content_type=UNSPECIFIED, id=None,
+ subject=None, to=None, user_id=None, reply_to=None,
+ correlation_id=None, durable=None, properties=None):
+ """
+ Construct a new message with the supplied content. The
+ content-type of the message will be automatically inferred from
+ type of the content parameter.
+
+ @type content: str, unicode, buffer, dict, list
+ @param content: the message content
+
+ @type content_type: str
+ @param content_type: the content-type of the message
+ """
+ self.id = id
+ self.subject = subject
+ self.to = to
+ self.user_id = user_id
+ self.reply_to = reply_to
+ self.correlation_id = correlation_id
+ self.durable = durable
+ self.redelivered = False
+ if properties is None:
+ self.properties = {}
+ else:
+ self.properties = properties
+ if content_type is UNSPECIFIED:
+ self.content_type = get_type(content)
+ else:
+ self.content_type = content_type
+ self.content = content
+
+ def __repr__(self):
+ args = []
+ for name in ["id", "subject", "to", "user_id", "reply_to",
+ "correlation_id"]:
+ value = self.__dict__[name]
+ if value is not None: args.append("%s=%r" % (name, value))
+ for name in ["durable", "properties"]:
+ value = self.__dict__[name]
+ if value: args.append("%s=%r" % (name, value))
+ if self.content_type != get_type(self.content):
+ args.append("content_type=%r" % self.content_type)
+ if self.content is not None:
+ if args:
+ args.append("content=%r" % self.content)
+ else:
+ args.append(repr(self.content))
+ return "Message(%s)" % ", ".join(args)
+
+__all__ = ["Connection", "Session", "Sender", "Receiver", "Pattern", "Message",
+ "ConnectionError", "ConnectError", "SessionError", "Disconnected",
+ "SendError", "InsufficientCapacity", "ReceiveError", "Empty",
+ "timestamp", "uuid4", "UNLIMITED", "AMQP_PORT", "AMQPS_PORT"]
diff --git a/python/qpid/mimetype.py b/python/qpid/mimetype.py
new file mode 100644
index 0000000000..f512996b9f
--- /dev/null
+++ b/python/qpid/mimetype.py
@@ -0,0 +1,106 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import re, rfc822
+from lexer import Lexicon, LexError
+from parser import Parser, ParseError
+
+l = Lexicon()
+
+LPAREN = l.define("LPAREN", r"\(")
+RPAREN = l.define("LPAREN", r"\)")
+SLASH = l.define("SLASH", r"/")
+SEMI = l.define("SEMI", r";")
+EQUAL = l.define("EQUAL", r"=")
+TOKEN = l.define("TOKEN", r'[^()<>@,;:\\"/\[\]?= ]+')
+STRING = l.define("STRING", r'"(?:[^\\"]|\\.)*"')
+WSPACE = l.define("WSPACE", r"[ \n\r\t]+")
+EOF = l.eof("EOF")
+
+LEXER = l.compile()
+
+def lex(st):
+ return LEXER.lex(st)
+
+class MimeTypeParser(Parser):
+
+ def __init__(self, tokens):
+ Parser.__init__(self, [t for t in tokens if t.type is not WSPACE])
+
+ def parse(self):
+ result = self.mimetype()
+ self.eat(EOF)
+ return result
+
+ def mimetype(self):
+ self.remove_comments()
+ self.reset()
+
+ type = self.eat(TOKEN).value.lower()
+ self.eat(SLASH)
+ subtype = self.eat(TOKEN).value.lower()
+
+ params = []
+ while True:
+ if self.matches(SEMI):
+ params.append(self.parameter())
+ else:
+ break
+
+ return type, subtype, params
+
+ def remove_comments(self):
+ while True:
+ self.eat_until(LPAREN, EOF)
+ if self.matches(LPAREN):
+ self.remove(*self.comment())
+ else:
+ break
+
+ def comment(self):
+ start = self.eat(LPAREN)
+
+ while True:
+ self.eat_until(LPAREN, RPAREN)
+ if self.matches(LPAREN):
+ self.comment()
+ else:
+ break
+
+ end = self.eat(RPAREN)
+ return start, end
+
+ def parameter(self):
+ self.eat(SEMI)
+ name = self.eat(TOKEN).value
+ self.eat(EQUAL)
+ value = self.value()
+ return name, value
+
+ def value(self):
+ if self.matches(TOKEN):
+ return self.eat().value
+ elif self.matches(STRING):
+ return rfc822.unquote(self.eat().value)
+ else:
+ raise ParseError(self.next(), TOKEN, STRING)
+
+def parse(addr):
+ return MimeTypeParser(lex(addr)).parse()
+
+__all__ = ["parse", "ParseError"]
diff --git a/python/qpid/ops.py b/python/qpid/ops.py
new file mode 100644
index 0000000000..a8ba826857
--- /dev/null
+++ b/python/qpid/ops.py
@@ -0,0 +1,280 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import os, mllib, cPickle as pickle
+from util import fill
+
+class Primitive(object):
+ pass
+
+class Enum(object):
+ pass
+
+class Field:
+
+ def __init__(self, name, type, default=None):
+ self.name = name
+ self.type = type
+ self.default = default
+
+ def __repr__(self):
+ return "%s: %s" % (self.name, self.type)
+
+class Compound(object):
+
+ UNENCODED=[]
+
+ def __init__(self, *args, **kwargs):
+ args = list(args)
+ for f in self.ARGS:
+ if args:
+ a = args.pop(0)
+ else:
+ a = kwargs.pop(f.name, f.default)
+ setattr(self, f.name, a)
+ if args:
+ raise TypeError("%s takes at most %s arguments (%s given))" %
+ (self.__class__.__name__, len(self.ARGS),
+ len(self.ARGS) + len(args)))
+ if kwargs:
+ raise TypeError("got unexpected keyword argument '%s'" % kwargs.keys()[0])
+
+ def fields(self):
+ result = {}
+ for f in self.FIELDS:
+ result[f.name] = getattr(self, f.name)
+ return result
+
+ def args(self):
+ result = {}
+ for f in self.ARGS:
+ result[f.name] = getattr(self, f.name)
+ return result
+
+ def __getitem__(self, attr):
+ return getattr(self, attr)
+
+ def __setitem__(self, attr, value):
+ setattr(self, attr, value)
+
+ def dispatch(self, target, *args):
+ handler = "do_%s" % self.NAME
+ getattr(target, handler)(self, *args)
+
+ def __repr__(self, extras=()):
+ return "%s(%s)" % (self.__class__.__name__,
+ ", ".join(["%s=%r" % (f.name, getattr(self, f.name))
+ for f in self.ARGS
+ if getattr(self, f.name) != f.default]))
+
+class Command(Compound):
+ UNENCODED=[Field("channel", "uint16", 0),
+ Field("id", "sequence-no", None),
+ Field("sync", "bit", False),
+ Field("headers", None, None),
+ Field("payload", None, None)]
+
+class Control(Compound):
+ UNENCODED=[Field("channel", "uint16", 0)]
+
+def pythonize(st):
+ if st is None:
+ return None
+ else:
+ return str(st.replace("-", "_"))
+
+def pydoc(op, children=()):
+ doc = "\n\n".join([fill(p.text(), 0) for p in op.query["doc"]])
+ for ch in children:
+ doc += "\n\n " + pythonize(ch["@name"]) + " -- " + str(ch["@label"])
+ ch_descs ="\n\n".join([fill(p.text(), 4) for p in ch.query["doc"]])
+ if ch_descs:
+ doc += "\n\n" + ch_descs
+ return doc
+
+def studly(st):
+ return "".join([p.capitalize() for p in st.split("-")])
+
+def klass(nd):
+ while nd.parent is not None:
+ if hasattr(nd.parent, "name") and nd.parent.name == "class":
+ return nd.parent
+ else:
+ nd = nd.parent
+
+def included(nd):
+ cls = klass(nd)
+ if cls is None:
+ return True
+ else:
+ return cls["@name"] not in ("file", "stream")
+
+def num(s):
+ if s: return int(s, 0)
+
+def code(nd):
+ c = num(nd["@code"])
+ if c is None:
+ return None
+ else:
+ cls = klass(nd)
+ if cls is None:
+ return c
+ else:
+ return c | (num(cls["@code"]) << 8)
+
+def default(f):
+ if f["@type"] == "bit":
+ return False
+ else:
+ return None
+
+def make_compound(decl, base):
+ dict = {}
+ fields = decl.query["field"]
+ dict["__doc__"] = pydoc(decl, fields)
+ dict["NAME"] = pythonize(decl["@name"])
+ dict["SIZE"] = num(decl["@size"])
+ dict["CODE"] = code(decl)
+ dict["PACK"] = num(decl["@pack"])
+ dict["FIELDS"] = [Field(pythonize(f["@name"]), resolve(f), default(f)) for f in fields]
+ dict["ARGS"] = dict["FIELDS"] + base.UNENCODED
+ return str(studly(decl["@name"])), (base,), dict
+
+def make_restricted(decl):
+ name = pythonize(decl["@name"])
+ dict = {}
+ choices = decl.query["choice"]
+ dict["__doc__"] = pydoc(decl, choices)
+ dict["NAME"] = name
+ dict["TYPE"] = str(decl.parent["@type"])
+ values = []
+ for ch in choices:
+ val = int(ch["@value"], 0)
+ dict[pythonize(ch["@name"])] = val
+ values.append(val)
+ dict["VALUES"] = values
+ return name, (Enum,), dict
+
+def make_type(decl):
+ name = pythonize(decl["@name"])
+ dict = {}
+ dict["__doc__"] = pydoc(decl)
+ dict["NAME"] = name
+ dict["CODE"] = code(decl)
+ return str(studly(decl["@name"])), (Primitive,), dict
+
+def make_command(decl):
+ decl.set_attr("name", "%s-%s" % (decl.parent["@name"], decl["@name"]))
+ decl.set_attr("size", "0")
+ decl.set_attr("pack", "2")
+ name, bases, dict = make_compound(decl, Command)
+ dict["RESULT"] = pythonize(decl["result/@type"]) or pythonize(decl["result/struct/@name"])
+ return name, bases, dict
+
+def make_control(decl):
+ decl.set_attr("name", "%s-%s" % (decl.parent["@name"], decl["@name"]))
+ decl.set_attr("size", "0")
+ decl.set_attr("pack", "2")
+ return make_compound(decl, Control)
+
+def make_struct(decl):
+ return make_compound(decl, Compound)
+
+def make_enum(decl):
+ decl.set_attr("name", decl.parent["@name"])
+ return make_restricted(decl)
+
+
+vars = globals()
+
+def make(nd):
+ return vars["make_%s" % nd.name](nd)
+
+from qpid_config import amqp_spec as file
+pclfile = "%s.ops.pcl" % file
+
+if os.path.exists(pclfile) and \
+ os.path.getmtime(pclfile) > os.path.getmtime(file):
+ f = open(pclfile, "r")
+ types = pickle.load(f)
+ f.close()
+else:
+ spec = mllib.xml_parse(file)
+
+ def qualify(nd, field="@name"):
+ cls = klass(nd)
+ if cls is None:
+ return pythonize(nd[field])
+ else:
+ return pythonize("%s.%s" % (cls["@name"], nd[field]))
+
+ domains = dict([(qualify(d), pythonize(d["@type"]))
+ for d in spec.query["amqp/domain", included] + \
+ spec.query["amqp/class/domain", included]])
+
+ def resolve(nd):
+ candidates = qualify(nd, "@type"), pythonize(nd["@type"])
+ for c in candidates:
+ if domains.has_key(c):
+ while domains.has_key(c):
+ c = domains[c]
+ return c
+ else:
+ return c
+
+ type_decls = \
+ spec.query["amqp/class/command", included] + \
+ spec.query["amqp/class/control", included] + \
+ spec.query["amqp/class/command/result/struct", included] + \
+ spec.query["amqp/class/struct", included] + \
+ spec.query["amqp/class/domain/enum", included] + \
+ spec.query["amqp/domain/enum", included] + \
+ spec.query["amqp/type"]
+ types = [make(nd) for nd in type_decls]
+
+ if os.access(os.path.dirname(os.path.abspath(pclfile)), os.W_OK):
+ f = open(pclfile, "w")
+ pickle.dump(types, f)
+ f.close()
+
+ENUMS = {}
+PRIMITIVE = {}
+COMPOUND = {}
+COMMANDS = {}
+CONTROLS = {}
+
+for name, bases, _dict in types:
+ t = type(name, bases, _dict)
+ vars[name] = t
+
+ if issubclass(t, Command):
+ COMMANDS[t.NAME] = t
+ COMMANDS[t.CODE] = t
+ elif issubclass(t, Control):
+ CONTROLS[t.NAME] = t
+ CONTROLS[t.CODE] = t
+ elif issubclass(t, Compound):
+ COMPOUND[t.NAME] = t
+ if t.CODE is not None:
+ COMPOUND[t.CODE] = t
+ elif issubclass(t, Primitive):
+ PRIMITIVE[t.NAME] = t
+ PRIMITIVE[t.CODE] = t
+ elif issubclass(t, Enum):
+ ENUMS[t.NAME] = t
diff --git a/python/qpid/parser.py b/python/qpid/parser.py
new file mode 100644
index 0000000000..233f0a8469
--- /dev/null
+++ b/python/qpid/parser.py
@@ -0,0 +1,68 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+class ParseError(Exception):
+
+ def __init__(self, token, *expected):
+ line, ln, col = token.line_info()
+ exp = ", ".join(map(str, expected))
+ if len(expected) > 1:
+ exp = "(%s)" % exp
+ if expected:
+ msg = "expecting %s, got %s line:%s,%s:%s" % (exp, token, ln, col, line)
+ else:
+ msg = "unexpected token %s line:%s,%s:%s" % (token, ln, col, line)
+ Exception.__init__(self, msg)
+ self.token = token
+ self.expected = expected
+
+class Parser:
+
+ def __init__(self, tokens):
+ self.tokens = tokens
+ self.idx = 0
+
+ def next(self):
+ return self.tokens[self.idx]
+
+ def matches(self, *types):
+ return self.next().type in types
+
+ def eat(self, *types):
+ if types and not self.matches(*types):
+ raise ParseError(self.next(), *types)
+ else:
+ t = self.next()
+ self.idx += 1
+ return t
+
+ def eat_until(self, *types):
+ result = []
+ while not self.matches(*types):
+ result.append(self.eat())
+ return result
+
+ def remove(self, start, end):
+ start_idx = self.tokens.index(start)
+ end_idx = self.tokens.index(end) + 1
+ del self.tokens[start_idx:end_idx]
+ self.idx -= end_idx - start_idx
+
+ def reset(self):
+ self.idx = 0
diff --git a/python/qpid/peer.py b/python/qpid/peer.py
index 0932efeab3..2bc9844351 100644
--- a/python/qpid/peer.py
+++ b/python/qpid/peer.py
@@ -25,7 +25,7 @@ incoming method frames to a delegate.
"""
import thread, threading, traceback, socket, sys, logging
-from connection08 import EOF, Method, Header, Body, Request, Response
+from connection08 import EOF, Method, Header, Body, Request, Response, VersionError
from message import Message
from queue import Queue, Closed as QueueClosed
from content import Content
@@ -95,6 +95,8 @@ class Peer:
break
ch = self.channel(frame.channel)
ch.receive(frame, self.work)
+ except VersionError, e:
+ self.closed(e)
except:
self.fatal()
@@ -193,11 +195,7 @@ class Channel:
self.futures = {}
self.control_queue = Queue(0)#used for incoming methods that appas may want to handle themselves
- # Use reliable framing if version == 0-9.
- if spec.major == 0 and spec.minor == 9:
- self.invoker = self.invoke_reliable
- else:
- self.invoker = self.invoke_method
+ self.invoker = self.invoke_method
self.use_execution_layer = (spec.major == 0 and spec.minor == 10) or (spec.major == 99 and spec.minor == 0)
self.synchronous = True
@@ -464,6 +462,6 @@ class IncomingCompletion:
#TODO: record and manage the ranges properly
range = [mark, mark]
if (self.mark == -1):#hack until wraparound is implemented
- self.channel.execution_complete(cumulative_execution_mark=0xFFFFFFFF, ranged_execution_set=range)
+ self.channel.execution_complete(cumulative_execution_mark=0xFFFFFFFFL, ranged_execution_set=range)
else:
self.channel.execution_complete(cumulative_execution_mark=self.mark, ranged_execution_set=range)
diff --git a/python/qpid/queue.py b/python/qpid/queue.py
index c9f4d1d1d0..63a7684843 100644
--- a/python/qpid/queue.py
+++ b/python/qpid/queue.py
@@ -63,7 +63,9 @@ class Queue(BaseQueue):
if listener is None:
if self.thread is not None:
self.put(Queue.STOP)
- self.thread.join()
+ # loop and timed join permit keyboard interrupts to work
+ while self.thread.isAlive():
+ self.thread.join(3)
self.thread = None
self.listener = listener
diff --git a/python/qpid/selector.py b/python/qpid/selector.py
new file mode 100644
index 0000000000..ca5946c3f9
--- /dev/null
+++ b/python/qpid/selector.py
@@ -0,0 +1,139 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import atexit, time
+from compat import select, set, selectable_waiter
+from threading import Thread, Lock
+
+class Acceptor:
+
+ def __init__(self, sock, handler):
+ self.sock = sock
+ self.handler = handler
+
+ def fileno(self):
+ return self.sock.fileno()
+
+ def reading(self):
+ return True
+
+ def writing(self):
+ return False
+
+ def readable(self):
+ sock, addr = self.sock.accept()
+ self.handler(sock)
+
+class Selector:
+
+ lock = Lock()
+ DEFAULT = None
+
+ @staticmethod
+ def default():
+ Selector.lock.acquire()
+ try:
+ if Selector.DEFAULT is None:
+ sel = Selector()
+ atexit.register(sel.stop)
+ sel.start()
+ Selector.DEFAULT = sel
+ return Selector.DEFAULT
+ finally:
+ Selector.lock.release()
+
+ def __init__(self):
+ self.selectables = set()
+ self.reading = set()
+ self.writing = set()
+ self.waiter = selectable_waiter()
+ self.reading.add(self.waiter)
+ self.stopped = False
+ self.thread = None
+
+ def wakeup(self):
+ self.waiter.wakeup()
+
+ def register(self, selectable):
+ self.selectables.add(selectable)
+ self.modify(selectable)
+
+ def _update(self, selectable):
+ if selectable.reading():
+ self.reading.add(selectable)
+ else:
+ self.reading.discard(selectable)
+ if selectable.writing():
+ self.writing.add(selectable)
+ else:
+ self.writing.discard(selectable)
+ return selectable.timing()
+
+ def modify(self, selectable):
+ self._update(selectable)
+ self.wakeup()
+
+ def unregister(self, selectable):
+ self.reading.discard(selectable)
+ self.writing.discard(selectable)
+ self.selectables.discard(selectable)
+ self.wakeup()
+
+ def start(self):
+ self.stopped = False
+ self.thread = Thread(target=self.run)
+ self.thread.setDaemon(True)
+ self.thread.start();
+
+ def run(self):
+ while not self.stopped:
+ wakeup = None
+ for sel in self.selectables.copy():
+ t = self._update(sel)
+ if t is not None:
+ if wakeup is None:
+ wakeup = t
+ else:
+ wakeup = min(wakeup, t)
+
+ if wakeup is None:
+ timeout = None
+ else:
+ timeout = max(0, wakeup - time.time())
+
+ rd, wr, ex = select(self.reading, self.writing, (), timeout)
+
+ for sel in wr:
+ if sel.writing():
+ sel.writeable()
+
+ for sel in rd:
+ if sel.reading():
+ sel.readable()
+
+ now = time.time()
+ for sel in self.selectables.copy():
+ w = sel.timing()
+ if w is not None and now > w:
+ sel.timeout()
+
+ def stop(self, timeout=None):
+ self.stopped = True
+ self.wakeup()
+ self.thread.join(timeout)
+ self.thread = None
diff --git a/python/qpid/session.py b/python/qpid/session.py
index 2f70461ab6..2f1bd81bd4 100644
--- a/python/qpid/session.py
+++ b/python/qpid/session.py
@@ -18,12 +18,13 @@
#
from threading import Condition, RLock, Lock, currentThread
-from invoker import Invoker
+from spec import SPEC
+from generator import command_invoker
from datatypes import RangedSet, Struct, Future
from codec010 import StringCodec
-from assembler import Segment
from queue import Queue
from datatypes import Message, serial
+from ops import Command, MessageTransfer
from util import wait, notify
from exceptions import *
from logging import getLogger
@@ -43,12 +44,12 @@ def server(*args):
INCOMPLETE = object()
-class Session(Invoker):
+class Session(command_invoker()):
- def __init__(self, name, spec, auto_sync=True, timeout=10, delegate=client):
+ def __init__(self, name, auto_sync=True, timeout=10, delegate=client):
self.name = name
- self.spec = spec
self.auto_sync = auto_sync
+ self.need_sync = True
self.timeout = timeout
self.channel = None
self.invoke_lock = Lock()
@@ -66,8 +67,6 @@ class Session(Invoker):
self.results = {}
self.exceptions = []
- self.assembly = None
-
self.delegate = delegate(self)
def incoming(self, destination):
@@ -94,7 +93,7 @@ class Session(Invoker):
ch = self.channel
if ch is not None and currentThread() == ch.connection.thread:
raise SessionException("deadlock detected")
- if not self.auto_sync:
+ if self.need_sync:
self.execution_sync(sync=True)
last = self.sender.next_id - 1
if not wait(self.condition, lambda:
@@ -133,82 +132,50 @@ class Session(Invoker):
finally:
self.lock.release()
- def resolve_method(self, name):
- cmd = self.spec.instructions.get(name)
- if cmd is not None and cmd.track == self.spec["track.command"].value:
- return self.METHOD, cmd
+ def invoke(self, op, args, kwargs):
+ if issubclass(op, Command):
+ self.invoke_lock.acquire()
+ try:
+ return self.do_invoke(op, args, kwargs)
+ finally:
+ self.invoke_lock.release()
else:
- # XXX
- for st in self.spec.structs.values():
- if st.name == name:
- return self.METHOD, st
- if self.spec.structs_by_name.has_key(name):
- return self.METHOD, self.spec.structs_by_name[name]
- if self.spec.enums.has_key(name):
- return self.VALUE, self.spec.enums[name]
- return self.ERROR, None
-
- def invoke(self, type, args, kwargs):
- # XXX
- if not hasattr(type, "track"):
- return type.new(args, kwargs)
-
- self.invoke_lock.acquire()
- try:
- return self.do_invoke(type, args, kwargs)
- finally:
- self.invoke_lock.release()
+ return op(*args, **kwargs)
- def do_invoke(self, type, args, kwargs):
+ def do_invoke(self, op, args, kwargs):
if self._closing:
raise SessionClosed()
- if self.channel == None:
+ ch = self.channel
+ if ch == None:
raise SessionDetached()
- if type.segments:
- if len(args) == len(type.fields) + 1:
+ if op == MessageTransfer:
+ if len(args) == len(op.FIELDS) + 1:
message = args[-1]
args = args[:-1]
else:
message = kwargs.pop("message", None)
- else:
- message = None
-
- hdr = Struct(self.spec["session.header"])
- hdr.sync = self.auto_sync or kwargs.pop("sync", False)
+ if message is not None:
+ kwargs["headers"] = message.headers
+ kwargs["payload"] = message.body
- cmd = type.new(args, kwargs)
- sc = StringCodec(self.spec)
- sc.write_command(hdr, cmd)
+ cmd = op(*args, **kwargs)
+ cmd.sync = self.auto_sync or cmd.sync
+ self.need_sync = not cmd.sync
+ cmd.channel = ch.id
- seg = Segment(True, (message == None or
- (message.headers == None and message.body == None)),
- type.segment_type, type.track, self.channel.id, sc.encoded)
-
- if type.result:
+ if op.RESULT:
result = Future(exception=SessionException)
self.results[self.sender.next_id] = result
- self.send(seg)
-
- log.debug("SENT %s %s %s", seg.id, hdr, cmd)
-
- if message != None:
- if message.headers != None:
- sc = StringCodec(self.spec)
- for st in message.headers:
- sc.write_struct32(st)
- seg = Segment(False, message.body == None, self.spec["segment_type.header"].value,
- type.track, self.channel.id, sc.encoded)
- self.send(seg)
- if message.body != None:
- seg = Segment(False, True, self.spec["segment_type.body"].value,
- type.track, self.channel.id, message.body)
- self.send(seg)
- msg.debug("SENT %s", message)
-
- if type.result:
+ self.send(cmd)
+
+ log.debug("SENT %s", cmd)
+ if op == MessageTransfer:
+ msg.debug("SENT %s", cmd)
+
+ if op.RESULT:
if self.auto_sync:
return result.get(self.timeout)
else:
@@ -216,81 +183,47 @@ class Session(Invoker):
elif self.auto_sync:
self.sync(self.timeout)
- def received(self, seg):
- self.receiver.received(seg)
- if seg.first:
- assert self.assembly == None
- self.assembly = []
- self.assembly.append(seg)
- if seg.last:
- self.dispatch(self.assembly)
- self.assembly = None
+ def received(self, cmd):
+ self.receiver.received(cmd)
+ self.dispatch(cmd)
- def dispatch(self, assembly):
- segments = assembly[:]
+ def dispatch(self, cmd):
+ log.debug("RECV %s", cmd)
- hdr, cmd = assembly.pop(0).decode(self.spec)
- log.debug("RECV %s %s %s", cmd.id, hdr, cmd)
-
- args = []
-
- for st in cmd._type.segments:
- if assembly:
- seg = assembly[0]
- if seg.type == st.segment_type:
- args.append(seg.decode(self.spec))
- assembly.pop(0)
- continue
- args.append(None)
-
- assert len(assembly) == 0
-
- attr = cmd._type.qname.replace(".", "_")
- result = getattr(self.delegate, attr)(cmd, *args)
-
- if cmd._type.result:
+ result = getattr(self.delegate, cmd.NAME)(cmd)
+ if result is INCOMPLETE:
+ return
+ elif result is not None:
self.execution_result(cmd.id, result)
- if result is not INCOMPLETE:
- for seg in segments:
- self.receiver.completed(seg)
- # XXX: don't forget to obey sync for manual completion as well
- if hdr.sync:
- self.channel.session_completed(self.receiver._completed)
+ self.receiver.completed(cmd)
+ # XXX: don't forget to obey sync for manual completion as well
+ if cmd.sync:
+ self.channel.session_completed(self.receiver._completed)
- def send(self, seg):
- self.sender.send(seg)
-
- def __str__(self):
- return '<Session: %s, %s>' % (self.name, self.channel)
+ def send(self, cmd):
+ self.sender.send(cmd)
def __repr__(self):
- return str(self)
+ return '<Session: %s, %s>' % (self.name, self.channel)
class Receiver:
def __init__(self, session):
self.session = session
self.next_id = None
- self.next_offset = None
self._completed = RangedSet()
- def received(self, seg):
- if self.next_id == None or self.next_offset == None:
+ def received(self, cmd):
+ if self.next_id == None:
raise Exception("todo")
- seg.id = self.next_id
- seg.offset = self.next_offset
- if seg.last:
- self.next_id += 1
- self.next_offset = 0
- else:
- self.next_offset += len(seg.payload)
+ cmd.id = self.next_id
+ self.next_id += 1
- def completed(self, seg):
- if seg.id == None:
- raise ValueError("cannot complete unidentified segment")
- if seg.last:
- self._completed.add(seg.id)
+ def completed(self, cmd):
+ if cmd.id == None:
+ raise ValueError("cannot complete unidentified command")
+ self._completed.add(cmd.id)
def known_completed(self, commands):
completed = RangedSet()
@@ -307,30 +240,27 @@ class Sender:
def __init__(self, session):
self.session = session
self.next_id = serial(0)
- self.next_offset = 0
- self.segments = []
+ self.commands = []
self._completed = RangedSet()
- def send(self, seg):
- seg.id = self.next_id
- seg.offset = self.next_offset
- if seg.last:
- self.next_id += 1
- self.next_offset = 0
- else:
- self.next_offset += len(seg.payload)
- self.segments.append(seg)
+ def send(self, cmd):
+ ch = self.session.channel
+ if ch is None:
+ raise SessionDetached()
+ cmd.id = self.next_id
+ self.next_id += 1
if self.session.send_id:
self.session.send_id = False
- self.session.channel.session_command_point(seg.id, seg.offset)
- self.session.channel.connection.write_segment(seg)
+ ch.session_command_point(cmd.id, 0)
+ self.commands.append(cmd)
+ ch.connection.write_op(cmd)
def completed(self, commands):
idx = 0
- while idx < len(self.segments):
- seg = self.segments[idx]
- if seg.id in commands:
- del self.segments[idx]
+ while idx < len(self.commands):
+ cmd = self.commands[idx]
+ if cmd.id in commands:
+ del self.commands[idx]
else:
idx += 1
for range in commands.ranges:
@@ -344,8 +274,9 @@ class Incoming(Queue):
self.destination = destination
def start(self):
- for unit in self.session.credit_unit.values():
- self.session.message_flow(self.destination, unit, 0xFFFFFFFF)
+ self.session.message_set_flow_mode(self.destination, self.session.flow_mode.credit)
+ for unit in self.session.credit_unit.VALUES:
+ self.session.message_flow(self.destination, unit, 0xFFFFFFFFL)
def stop(self):
self.session.message_cancel(self.destination)
@@ -368,9 +299,9 @@ class Delegate:
class Client(Delegate):
- def message_transfer(self, cmd, headers, body):
- m = Message(body)
- m.headers = headers
+ def message_transfer(self, cmd):
+ m = Message(cmd.payload)
+ m.headers = cmd.headers
m.id = cmd.id
messages = self.session.incoming(cmd.destination)
messages.put(m)
diff --git a/python/qpid/spec.py b/python/qpid/spec.py
index e6d914044c..e9bfef1fa6 100644
--- a/python/qpid/spec.py
+++ b/python/qpid/spec.py
@@ -29,7 +29,7 @@ class so that the generated code can be reused in a variety of
situations.
"""
-import os, mllib, spec08, spec010
+import os, mllib, spec08
def default():
try:
@@ -54,6 +54,8 @@ def load(specfile, *errata):
minor = doc["amqp/@minor"]
if major == "0" and minor == "10":
- return spec010.load(specfile, *errata)
+ return None
else:
return spec08.load(specfile, *errata)
+
+SPEC = load(default())
diff --git a/python/qpid/spec010.py b/python/qpid/spec010.py
deleted file mode 100644
index 23966e6176..0000000000
--- a/python/qpid/spec010.py
+++ /dev/null
@@ -1,691 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import os, cPickle, datatypes
-from codec010 import StringCodec
-from util import mtime, fill
-
-class Node:
-
- def __init__(self, children):
- self.children = children
- self.named = {}
- self.docs = []
- self.rules = []
-
- def register(self):
- for ch in self.children:
- ch.register(self)
-
- def resolve(self):
- for ch in self.children:
- ch.resolve()
-
- def __getitem__(self, name):
- path = name.split(".", 1)
- nd = self.named
- for step in path:
- nd = nd[step]
- return nd
-
- def __iter__(self):
- return iter(self.children)
-
-class Anonymous:
-
- def __init__(self, children):
- self.children = children
-
- def register(self, node):
- for ch in self.children:
- ch.register(node)
-
- def resolve(self):
- for ch in self.children:
- ch.resolve()
-
-class Named:
-
- def __init__(self, name):
- self.name = name
- self.qname = None
-
- def register(self, node):
- self.spec = node.spec
- self.klass = node.klass
- node.named[self.name] = self
- if node.qname:
- self.qname = "%s.%s" % (node.qname, self.name)
- else:
- self.qname = self.name
-
- def __str__(self):
- return self.qname
-
- def __repr__(self):
- return str(self)
-
-class Lookup:
-
- def lookup(self, name):
- value = None
- if self.klass:
- try:
- value = self.klass[name]
- except KeyError:
- pass
- if not value:
- value = self.spec[name]
- return value
-
-class Coded:
-
- def __init__(self, code):
- self.code = code
-
-class Constant(Named, Node):
-
- def __init__(self, name, value, children):
- Named.__init__(self, name)
- Node.__init__(self, children)
- self.value = value
-
- def register(self, node):
- Named.register(self, node)
- node.constants.append(self)
- Node.register(self)
-
-class Type(Named, Node):
-
- def __init__(self, name, children):
- Named.__init__(self, name)
- Node.__init__(self, children)
-
- def is_present(self, value):
- return value != None
-
- def register(self, node):
- Named.register(self, node)
- Node.register(self)
-
-class Primitive(Coded, Type):
-
- def __init__(self, name, code, fixed, variable, children):
- Coded.__init__(self, code)
- Type.__init__(self, name, children)
- self.fixed = fixed
- self.variable = variable
-
- def register(self, node):
- Type.register(self, node)
- if self.code is not None:
- self.spec.types[self.code] = self
-
- def is_present(self, value):
- if self.fixed == 0:
- return value
- else:
- return Type.is_present(self, value)
-
- def encode(self, codec, value):
- getattr(codec, "write_%s" % self.name)(value)
-
- def decode(self, codec):
- return getattr(codec, "read_%s" % self.name)()
-
-class Domain(Type, Lookup):
-
- def __init__(self, name, type, children):
- Type.__init__(self, name, children)
- self.type = type
- self.choices = {}
-
- def resolve(self):
- self.type = self.lookup(self.type)
- Node.resolve(self)
-
- def encode(self, codec, value):
- self.type.encode(codec, value)
-
- def decode(self, codec):
- return self.type.decode(codec)
-
-class Enum:
-
- def __init__(self, name):
- self.name = name
- self._names = ()
- self._values = ()
-
- def values(self):
- return self._values
-
- def __repr__(self):
- return "%s(%s)" % (self.name, ", ".join(self._names))
-
-class Choice(Named, Node):
-
- def __init__(self, name, value, children):
- Named.__init__(self, name)
- Node.__init__(self, children)
- self.value = value
-
- def register(self, node):
- Named.register(self, node)
- node.choices[self.value] = self
- Node.register(self)
- try:
- enum = node.spec.enums[node.name]
- except KeyError:
- enum = Enum(node.name)
- node.spec.enums[node.name] = enum
- setattr(enum, self.name, self.value)
- enum._names += (self.name,)
- enum._values += (self.value,)
-
-class Composite(Type, Coded):
-
- def __init__(self, name, label, code, size, pack, children):
- Coded.__init__(self, code)
- Type.__init__(self, name, children)
- self.label = label
- self.fields = []
- self.size = size
- self.pack = pack
-
- def new(self, args, kwargs):
- return datatypes.Struct(self, *args, **kwargs)
-
- def decode(self, codec):
- codec.read_size(self.size)
- if self.code is not None:
- code = codec.read_uint16()
- assert self.code == code
- return datatypes.Struct(self, **self.decode_fields(codec))
-
- def decode_fields(self, codec):
- flags = 0
- for i in range(self.pack):
- flags |= (codec.read_uint8() << 8*i)
-
- result = {}
-
- for i in range(len(self.fields)):
- f = self.fields[i]
- if flags & (0x1 << i):
- result[f.name] = f.type.decode(codec)
- else:
- result[f.name] = None
- return result
-
- def encode(self, codec, value):
- sc = StringCodec(self.spec)
- if self.code is not None:
- sc.write_uint16(self.code)
- self.encode_fields(sc, value)
- codec.write_size(self.size, len(sc.encoded))
- codec.write(sc.encoded)
-
- def encode_fields(self, codec, values):
- flags = 0
- for i in range(len(self.fields)):
- f = self.fields[i]
- if f.type.is_present(values[f.name]):
- flags |= (0x1 << i)
- for i in range(self.pack):
- codec.write_uint8((flags >> 8*i) & 0xFF)
- for i in range(len(self.fields)):
- f = self.fields[i]
- if flags & (0x1 << i):
- f.type.encode(codec, values[f.name])
-
- def docstring(self):
- docs = []
- if self.label:
- docs.append(self.label)
- docs += [d.text for d in self.docs]
- s = "\n\n".join([fill(t, 2) for t in docs])
- for f in self.fields:
- fdocs = []
- if f.label:
- fdocs.append(f.label)
- else:
- fdocs.append("")
- fdocs += [d.text for d in f.docs]
- s += "\n\n" + "\n\n".join([fill(fdocs[0], 4, f.name)] +
- [fill(t, 4) for t in fdocs[1:]])
- return s
-
-
-class Field(Named, Node, Lookup):
-
- def __init__(self, name, label, type, children):
- Named.__init__(self, name)
- Node.__init__(self, children)
- self.label = label
- self.type = type
- self.exceptions = []
-
- def default(self):
- return None
-
- def register(self, node):
- Named.register(self, node)
- node.fields.append(self)
- Node.register(self)
-
- def resolve(self):
- self.type = self.lookup(self.type)
- Node.resolve(self)
-
- def __str__(self):
- return "%s: %s" % (self.qname, self.type.qname)
-
-class Struct(Composite):
-
- def register(self, node):
- Composite.register(self, node)
- if self.code is not None:
- self.spec.structs[self.code] = self
- self.spec.structs_by_name[self.name] = self
- self.pyname = self.name
- self.pydoc = self.docstring()
-
- def __str__(self):
- fields = ",\n ".join(["%s: %s" % (f.name, f.type.qname)
- for f in self.fields])
- return "%s {\n %s\n}" % (self.qname, fields)
-
-class Segment:
-
- def __init__(self):
- self.segment_type = None
-
- def register(self, node):
- self.spec = node.spec
- self.klass = node.klass
- node.segments.append(self)
- Node.register(self)
-
-class Instruction(Composite, Segment):
-
- def __init__(self, name, label, code, children):
- Composite.__init__(self, name, label, code, 0, 2, children)
- Segment.__init__(self)
- self.track = None
- self.handlers = []
-
- def __str__(self):
- return "%s(%s)" % (self.qname, ", ".join(["%s: %s" % (f.name, f.type.qname)
- for f in self.fields]))
-
- def register(self, node):
- Composite.register(self, node)
- self.pyname = self.qname.replace(".", "_")
- self.pydoc = self.docstring()
- self.spec.instructions[self.pyname] = self
-
-class Control(Instruction):
-
- def __init__(self, name, code, label, children):
- Instruction.__init__(self, name, code, label, children)
- self.response = None
-
- def register(self, node):
- Instruction.register(self, node)
- node.controls.append(self)
- self.spec.controls[self.code] = self
- self.segment_type = self.spec["segment_type.control"].value
- self.track = self.spec["track.control"].value
-
-class Command(Instruction):
-
- def __init__(self, name, label, code, children):
- Instruction.__init__(self, name, label, code, children)
- self.result = None
- self.exceptions = []
- self.segments = []
-
- def register(self, node):
- Instruction.register(self, node)
- node.commands.append(self)
- self.spec.commands[self.code] = self
- self.segment_type = self.spec["segment_type.command"].value
- self.track = self.spec["track.command"].value
-
-class Header(Segment, Node):
-
- def __init__(self, children):
- Segment.__init__(self)
- Node.__init__(self, children)
- self.entries = []
-
- def register(self, node):
- Segment.register(self, node)
- self.segment_type = self.spec["segment_type.header"].value
- Node.register(self)
-
-class Entry(Lookup):
-
- def __init__(self, type):
- self.type = type
-
- def register(self, node):
- self.spec = node.spec
- self.klass = node.klass
- node.entries.append(self)
-
- def resolve(self):
- self.type = self.lookup(self.type)
-
-class Body(Segment, Node):
-
- def __init__(self, children):
- Segment.__init__(self)
- Node.__init__(self, children)
-
- def register(self, node):
- Segment.register(self, node)
- self.segment_type = self.spec["segment_type.body"].value
- Node.register(self)
-
- def resolve(self): pass
-
-class Class(Named, Coded, Node):
-
- def __init__(self, name, code, children):
- Named.__init__(self, name)
- Coded.__init__(self, code)
- Node.__init__(self, children)
- self.controls = []
- self.commands = []
-
- def register(self, node):
- Named.register(self, node)
- self.klass = self
- node.classes.append(self)
- Node.register(self)
-
-class Doc:
-
- def __init__(self, type, title, text):
- self.type = type
- self.title = title
- self.text = text
-
- def register(self, node):
- node.docs.append(self)
-
- def resolve(self): pass
-
-class Role(Named, Node):
-
- def __init__(self, name, children):
- Named.__init__(self, name)
- Node.__init__(self, children)
-
- def register(self, node):
- Named.register(self, node)
- Node.register(self)
-
-class Rule(Named, Node):
-
- def __init__(self, name, children):
- Named.__init__(self, name)
- Node.__init__(self, children)
-
- def register(self, node):
- Named.register(self, node)
- node.rules.append(self)
- Node.register(self)
-
-class Exception(Named, Node):
-
- def __init__(self, name, error_code, children):
- Named.__init__(self, name)
- Node.__init__(self, children)
- self.error_code = error_code
-
- def register(self, node):
- Named.register(self, node)
- node.exceptions.append(self)
- Node.register(self)
-
-class Spec(Node):
-
- ENCODINGS = {
- basestring: "vbin16",
- int: "int64",
- long: "int64",
- float: "float",
- None.__class__: "void",
- list: "list",
- tuple: "list",
- dict: "map"
- }
-
- def __init__(self, major, minor, port, children):
- Node.__init__(self, children)
- self.major = major
- self.minor = minor
- self.port = port
- self.constants = []
- self.classes = []
- self.types = {}
- self.qname = None
- self.spec = self
- self.klass = None
- self.instructions = {}
- self.controls = {}
- self.commands = {}
- self.structs = {}
- self.structs_by_name = {}
- self.enums = {}
-
- def encoding(self, klass):
- if Spec.ENCODINGS.has_key(klass):
- return self.named[Spec.ENCODINGS[klass]]
- for base in klass.__bases__:
- result = self.encoding(base)
- if result != None:
- return result
-
-class Implement:
-
- def __init__(self, handle):
- self.handle = handle
-
- def register(self, node):
- node.handlers.append(self.handle)
-
- def resolve(self): pass
-
-class Response(Node):
-
- def __init__(self, name, children):
- Node.__init__(self, children)
- self.name = name
-
- def register(self, node):
- Node.register(self)
-
-class Result(Node, Lookup):
-
- def __init__(self, type, children):
- self.type = type
- Node.__init__(self, children)
-
- def register(self, node):
- node.result = self
- self.qname = node.qname
- self.klass = node.klass
- self.spec = node.spec
- Node.register(self)
-
- def resolve(self):
- self.type = self.lookup(self.type)
- Node.resolve(self)
-
-import mllib
-
-def num(s):
- if s: return int(s, 0)
-
-REPLACE = {" ": "_", "-": "_"}
-KEYWORDS = {"global": "global_",
- "return": "return_"}
-
-def id(name):
- name = str(name)
- for key, val in REPLACE.items():
- name = name.replace(key, val)
- try:
- name = KEYWORDS[name]
- except KeyError:
- pass
- return name
-
-class Loader:
-
- def __init__(self):
- self.class_code = 0
-
- def code(self, nd):
- c = num(nd["@code"])
- if c is None:
- return None
- else:
- return c | (self.class_code << 8)
-
- def list(self, q):
- result = []
- for nd in q:
- result.append(nd.dispatch(self))
- return result
-
- def children(self, n):
- return self.list(n.query["#tag"])
-
- def data(self, d):
- return d.data
-
- def do_amqp(self, a):
- return Spec(num(a["@major"]), num(a["@minor"]), num(a["@port"]),
- self.children(a))
-
- def do_type(self, t):
- return Primitive(id(t["@name"]), self.code(t), num(t["@fixed-width"]),
- num(t["@variable-width"]), self.children(t))
-
- def do_constant(self, c):
- return Constant(id(c["@name"]), num(c["@value"]), self.children(c))
-
- def do_domain(self, d):
- return Domain(id(d["@name"]), id(d["@type"]), self.children(d))
-
- def do_enum(self, e):
- return Anonymous(self.children(e))
-
- def do_choice(self, c):
- return Choice(id(c["@name"]), num(c["@value"]), self.children(c))
-
- def do_class(self, c):
- code = num(c["@code"])
- self.class_code = code
- children = self.children(c)
- children += self.list(c.query["command/result/struct"])
- self.class_code = 0
- return Class(id(c["@name"]), code, children)
-
- def do_doc(self, doc):
- text = reduce(lambda x, y: x + y, self.list(doc.children))
- return Doc(doc["@type"], doc["@title"], text)
-
- def do_xref(self, x):
- return x["@ref"]
-
- def do_role(self, r):
- return Role(id(r["@name"]), self.children(r))
-
- def do_control(self, c):
- return Control(id(c["@name"]), c["@label"], self.code(c), self.children(c))
-
- def do_rule(self, r):
- return Rule(id(r["@name"]), self.children(r))
-
- def do_implement(self, i):
- return Implement(id(i["@handle"]))
-
- def do_response(self, r):
- return Response(id(r["@name"]), self.children(r))
-
- def do_field(self, f):
- return Field(id(f["@name"]), f["@label"], id(f["@type"]), self.children(f))
-
- def do_struct(self, s):
- return Struct(id(s["@name"]), s["@label"], self.code(s), num(s["@size"]),
- num(s["@pack"]), self.children(s))
-
- def do_command(self, c):
- return Command(id(c["@name"]), c["@label"], self.code(c), self.children(c))
-
- def do_segments(self, s):
- return Anonymous(self.children(s))
-
- def do_header(self, h):
- return Header(self.children(h))
-
- def do_entry(self, e):
- return Entry(id(e["@type"]))
-
- def do_body(self, b):
- return Body(self.children(b))
-
- def do_result(self, r):
- type = r["@type"]
- if not type:
- type = r["struct/@name"]
- return Result(id(type), self.list(r.query["#tag", lambda x: x.name != "struct"]))
-
- def do_exception(self, e):
- return Exception(id(e["@name"]), id(e["@error-code"]), self.children(e))
-
-def load(xml):
- fname = xml + ".pcl"
-
- if os.path.exists(fname) and mtime(fname) > mtime(__file__):
- file = open(fname, "r")
- s = cPickle.load(file)
- file.close()
- else:
- doc = mllib.xml_parse(xml)
- s = doc["amqp"].dispatch(Loader())
- s.register()
- s.resolve()
-
- try:
- file = open(fname, "w")
- except IOError:
- file = None
-
- if file:
- cPickle.dump(s, file)
- file.close()
-
- return s
diff --git a/python/qpid/testlib.py b/python/qpid/testlib.py
index b5aa59f586..1439b892ea 100644
--- a/python/qpid/testlib.py
+++ b/python/qpid/testlib.py
@@ -21,191 +21,13 @@
# Support library for qpid python tests.
#
-import sys, re, unittest, os, random, logging, traceback
-import qpid.client, qpid.spec
+import unittest, traceback, socket
+import qpid.client, qmf.console
import Queue
-from fnmatch import fnmatch
-from getopt import getopt, GetoptError
from qpid.content import Content
from qpid.message import Message
-
-#0-10 support
-from qpid.connection import Connection
-from qpid.spec010 import load
-from qpid.util import connect
-
-def findmodules(root):
- """Find potential python modules under directory root"""
- found = []
- for dirpath, subdirs, files in os.walk(root):
- modpath = dirpath.replace(os.sep, '.')
- if not re.match(r'\.svn$', dirpath): # Avoid SVN directories
- for f in files:
- match = re.match(r'(.+)\.py$', f)
- if match and f != '__init__.py':
- found.append('.'.join([modpath, match.group(1)]))
- return found
-
-def default(value, default):
- if (value == None): return default
- else: return value
-
-class TestRunner:
-
- SPEC_FOLDER = "../specs"
-
- """Runs unit tests.
-
- Parses command line arguments, provides utility functions for tests,
- runs the selected test suite.
- """
-
- def _die(self, message = None):
- if message: print message
- print """
-run-tests [options] [test*]
-The name of a test is package.module.ClassName.testMethod
-Options:
- -?/-h/--help : this message
- -s/--spec <spec.xml> : URL of AMQP XML specification or one of these abbreviations:
- 0-8 - use the default 0-8 specification.
- 0-9 - use the default 0-9 specification.
- -e/--errata <errata.xml> : file containing amqp XML errata
- -b/--broker [<user>[/<password>]@]<host>[:<port>] : broker to connect to
- -v/--verbose : verbose - lists tests as they are run.
- -d/--debug : enable debug logging.
- -i/--ignore <test> : ignore the named test.
- -I/--ignore-file : file containing patterns to ignore.
- -S/--skip-self-test : skips the client self tests in the 'tests folder'
- -F/--spec-folder : folder that contains the specs to be loaded
- """
- sys.exit(1)
-
- def setBroker(self, broker):
- rex = re.compile(r"""
- # [ <user> [ / <password> ] @] <host> [ :<port> ]
- ^ (?: ([^/]*) (?: / ([^@]*) )? @)? ([^:]+) (?: :([0-9]+))?$""", re.X)
- match = rex.match(broker)
- if not match: self._die("'%s' is not a valid broker" % (broker))
- self.user, self.password, self.host, self.port = match.groups()
- self.port = int(default(self.port, 5672))
- self.user = default(self.user, "guest")
- self.password = default(self.password, "guest")
-
- def ignoreFile(self, filename):
- f = file(filename)
- for line in f.readlines(): self.ignore.append(line.strip())
- f.close()
-
- def use08spec(self):
- "True if we are running with the old 0-8 spec."
- # NB: AMQP 0-8 identifies itself as 8-0 for historical reasons.
- return self.spec.major==8 and self.spec.minor==0
-
- def _parseargs(self, args):
- # Defaults
- self.setBroker("localhost")
- self.verbose = 1
- self.ignore = []
- self.specfile = "0-8"
- self.errata = []
- self.skip_self_test = False
-
- try:
- opts, self.tests = getopt(args, "s:e:b:h?dvSi:I:F:",
- ["help", "spec", "errata=", "server",
- "verbose", "skip-self-test", "ignore",
- "ignore-file", "spec-folder"])
- except GetoptError, e:
- self._die(str(e))
- for opt, value in opts:
- if opt in ("-?", "-h", "--help"): self._die()
- if opt in ("-s", "--spec"): self.specfile = value
- if opt in ("-e", "--errata"): self.errata.append(value)
- if opt in ("-b", "--broker"): self.setBroker(value)
- if opt in ("-v", "--verbose"): self.verbose = 2
- if opt in ("-d", "--debug"): logging.basicConfig(level=logging.DEBUG)
- if opt in ("-i", "--ignore"): self.ignore.append(value)
- if opt in ("-I", "--ignore-file"): self.ignoreFile(value)
- if opt in ("-S", "--skip-self-test"): self.skip_self_test = True
- if opt in ("-F", "--spec-folder"): TestRunner.SPEC_FOLDER = value
- # Abbreviations for default settings.
- if (self.specfile == "0-10"):
- self.spec = load(self.get_spec_file("amqp.0-10.xml"))
- elif (self.specfile == "0-10-errata"):
- self.spec = load(self.get_spec_file("amqp.0-10-qpid-errata.xml"))
- else:
- if (self.specfile == "0-8"):
- self.specfile = self.get_spec_file("amqp.0-8.xml")
- elif (self.specfile == "0-9"):
- self.specfile = self.get_spec_file("amqp.0-9.xml")
- self.errata.append(self.get_spec_file("amqp-errata.0-9.xml"))
-
- if (self.specfile == None):
- self._die("No XML specification provided")
- print "Using specification from:", self.specfile
-
- self.spec = qpid.spec.load(self.specfile, *self.errata)
-
- if len(self.tests) == 0:
- if not self.skip_self_test:
- self.tests=findmodules("tests")
- if self.use08spec():
- self.tests+=findmodules("tests_0-8")
- elif (self.spec.major == 99 and self.spec.minor == 0):
- self.tests+=findmodules("tests_0-10_preview")
- elif (self.spec.major == 0 and self.spec.minor == 10):
- self.tests+=findmodules("tests_0-10")
- else:
- self.tests+=findmodules("tests_0-9")
-
- def testSuite(self):
- class IgnoringTestSuite(unittest.TestSuite):
- def addTest(self, test):
- if isinstance(test, unittest.TestCase):
- for pattern in testrunner.ignore:
- if fnmatch(test.id(), pattern):
- return
- unittest.TestSuite.addTest(self, test)
-
- # Use our IgnoringTestSuite in the test loader.
- unittest.TestLoader.suiteClass = IgnoringTestSuite
- return unittest.defaultTestLoader.loadTestsFromNames(self.tests)
-
- def run(self, args=sys.argv[1:]):
- self._parseargs(args)
- runner = unittest.TextTestRunner(descriptions=False,
- verbosity=self.verbose)
- result = runner.run(self.testSuite())
-
- if (self.ignore):
- print "======================================="
- print "NOTE: the following tests were ignored:"
- for t in self.ignore: print t
- print "======================================="
-
- return result.wasSuccessful()
-
- def connect(self, host=None, port=None, spec=None, user=None, password=None, tune_params=None):
- """Connect to the broker, returns a qpid.client.Client"""
- host = host or self.host
- port = port or self.port
- spec = spec or self.spec
- user = user or self.user
- password = password or self.password
- client = qpid.client.Client(host, port, spec)
- if self.use08spec():
- client.start({"LOGIN": user, "PASSWORD": password}, tune_params=tune_params)
- else:
- client.start("\x00" + user + "\x00" + password, mechanism="PLAIN", tune_params=tune_params)
- return client
-
- def get_spec_file(self, fname):
- return TestRunner.SPEC_FOLDER + os.sep + fname
-
-# Global instance for tests to call connect.
-testrunner = TestRunner()
-
+from qpid.harness import Skipped
+from qpid.exceptions import VersionError
class TestBase(unittest.TestCase):
"""Base class for Qpid test cases.
@@ -219,13 +41,16 @@ class TestBase(unittest.TestCase):
resources to clean up later.
"""
+ def configure(self, config):
+ self.config = config
+
def setUp(self):
self.queues = []
self.exchanges = []
self.client = self.connect()
self.channel = self.client.channel(1)
self.version = (self.client.spec.major, self.client.spec.minor)
- if self.version == (8, 0):
+ if self.version == (8, 0) or self.version == (0, 9):
self.channel.channel_open()
else:
self.channel.session_open()
@@ -245,9 +70,26 @@ class TestBase(unittest.TestCase):
else:
self.client.close()
- def connect(self, *args, **keys):
+ def connect(self, host=None, port=None, user=None, password=None, tune_params=None):
"""Create a new connction, return the Client object"""
- return testrunner.connect(*args, **keys)
+ host = host or self.config.broker.host
+ port = port or self.config.broker.port or 5672
+ user = user or "guest"
+ password = password or "guest"
+ client = qpid.client.Client(host, port)
+ try:
+ if client.spec.major == 8 and client.spec.minor == 0:
+ client.start({"LOGIN": user, "PASSWORD": password}, tune_params=tune_params)
+ else:
+ client.start("\x00" + user + "\x00" + password, mechanism="PLAIN", tune_params=tune_params)
+ except qpid.client.Closed, e:
+ if isinstance(e.args[0], VersionError):
+ raise Skipped(e.args[0])
+ else:
+ raise e
+ except socket.error, e:
+ raise Skipped(e)
+ return client
def queue_declare(self, channel=None, *args, **keys):
channel = channel or self.channel
@@ -271,24 +113,15 @@ class TestBase(unittest.TestCase):
def consume(self, queueName):
"""Consume from named queue returns the Queue object."""
- if testrunner.use08spec():
- reply = self.channel.basic_consume(queue=queueName, no_ack=True)
- return self.client.queue(reply.consumer_tag)
- else:
- if not "uniqueTag" in dir(self): self.uniqueTag = 1
- else: self.uniqueTag += 1
- consumer_tag = "tag" + str(self.uniqueTag)
- self.channel.message_subscribe(queue=queueName, destination=consumer_tag)
- self.channel.message_flow(destination=consumer_tag, unit=0, value=0xFFFFFFFF)
- self.channel.message_flow(destination=consumer_tag, unit=1, value=0xFFFFFFFF)
- return self.client.queue(consumer_tag)
+ reply = self.channel.basic_consume(queue=queueName, no_ack=True)
+ return self.client.queue(reply.consumer_tag)
def subscribe(self, channel=None, **keys):
channel = channel or self.channel
consumer_tag = keys["destination"]
channel.message_subscribe(**keys)
- channel.message_flow(destination=consumer_tag, unit=0, value=0xFFFFFFFF)
- channel.message_flow(destination=consumer_tag, unit=1, value=0xFFFFFFFF)
+ channel.message_flow(destination=consumer_tag, unit=0, value=0xFFFFFFFFL)
+ channel.message_flow(destination=consumer_tag, unit=1, value=0xFFFFFFFFL)
def assertEmpty(self, queue):
"""Assert that the queue is empty"""
@@ -302,24 +135,14 @@ class TestBase(unittest.TestCase):
Publish to exchange and assert queue.get() returns the same message.
"""
body = self.uniqueString()
- if testrunner.use08spec():
- self.channel.basic_publish(
- exchange=exchange,
- content=Content(body, properties=properties),
- routing_key=routing_key)
- else:
- self.channel.message_transfer(
- destination=exchange,
- content=Content(body, properties={'application_headers':properties,'routing_key':routing_key}))
+ self.channel.basic_publish(
+ exchange=exchange,
+ content=Content(body, properties=properties),
+ routing_key=routing_key)
msg = queue.get(timeout=1)
- if testrunner.use08spec():
- self.assertEqual(body, msg.content.body)
- if (properties):
- self.assertEqual(properties, msg.content.properties)
- else:
- self.assertEqual(body, msg.content.body)
- if (properties):
- self.assertEqual(properties, msg.content['application_headers'])
+ self.assertEqual(body, msg.content.body)
+ if (properties):
+ self.assertEqual(properties, msg.content.properties)
def assertPublishConsume(self, queue="", exchange="", routing_key="", properties=None):
"""
@@ -329,7 +152,7 @@ class TestBase(unittest.TestCase):
self.assertPublishGet(self.consume(queue), exchange, routing_key, properties)
def assertChannelException(self, expectedCode, message):
- if self.version == (8, 0): #or "transitional" in self.client.spec.file:
+ if self.version == (8, 0) or self.version == (0, 9):
if not isinstance(message, Message): self.fail("expected channel_close method, got %s" % (message))
self.assertEqual("channel", message.method.klass.name)
self.assertEqual("close", message.method.name)
@@ -346,31 +169,58 @@ class TestBase(unittest.TestCase):
self.assertEqual("close", message.method.name)
self.assertEqual(expectedCode, message.reply_code)
+#0-10 support
+from qpid.connection import Connection
+from qpid.util import connect, ssl, URL
+
class TestBase010(unittest.TestCase):
"""
Base class for Qpid test cases. using the final 0-10 spec
"""
+ def configure(self, config):
+ self.config = config
+ self.broker = config.broker
+ self.defines = self.config.defines
+
def setUp(self):
- spec = testrunner.spec
- self.conn = Connection(connect(testrunner.host, testrunner.port), spec,
- username=testrunner.user, password=testrunner.password)
- self.conn.start(timeout=10)
+ self.conn = self.connect()
self.session = self.conn.session("test-session", timeout=10)
+ self.qmf = None
+
+ def startQmf(self, handler=None):
+ self.qmf = qmf.console.Session(handler)
+ self.qmf_broker = self.qmf.addBroker(str(self.broker))
def connect(self, host=None, port=None):
- spec = testrunner.spec
- conn = Connection(connect(host or testrunner.host, port or testrunner.port), spec)
- conn.start(timeout=10)
+ url = self.broker
+ if url.scheme == URL.AMQPS:
+ default_port = 5671
+ else:
+ default_port = 5672
+ try:
+ sock = connect(host or url.host, port or url.port or default_port)
+ except socket.error, e:
+ raise Skipped(e)
+ if url.scheme == URL.AMQPS:
+ sock = ssl(sock)
+ conn = Connection(sock, username=url.user or "guest",
+ password=url.password or "guest")
+ try:
+ conn.start(timeout=10)
+ except VersionError, e:
+ raise Skipped(e)
return conn
def tearDown(self):
if not self.session.error(): self.session.close(timeout=10)
self.conn.close(timeout=10)
+ if self.qmf:
+ self.qmf.delBroker(self.qmf_broker)
def subscribe(self, session=None, **keys):
session = session or self.session
consumer_tag = keys["destination"]
session.message_subscribe(**keys)
- session.message_flow(destination=consumer_tag, unit=0, value=0xFFFFFFFF)
- session.message_flow(destination=consumer_tag, unit=1, value=0xFFFFFFFF)
+ session.message_flow(destination=consumer_tag, unit=0, value=0xFFFFFFFFL)
+ session.message_flow(destination=consumer_tag, unit=1, value=0xFFFFFFFFL)
diff --git a/python/run-tests b/python/qpid/tests/__init__.py
index 84b76ebfc1..2f0fcfdf67 100755..100644
--- a/python/run-tests
+++ b/python/qpid/tests/__init__.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
@@ -18,18 +17,12 @@
# under the License.
#
-import sys, logging
-from qpid.testlib import testrunner
-from qpid.log import enable, WARN, DEBUG
-
-if "-vv" in sys.argv:
- level = DEBUG
-else:
- level = WARN
-
-enable("qpid", level)
-
-if not testrunner.run(): sys.exit(1)
+class Test:
+ def __init__(self, name):
+ self.name = name
+ def configure(self, config):
+ self.config = config
+import address, framing, mimetype, messaging
diff --git a/python/qpid/tests/address.py b/python/qpid/tests/address.py
new file mode 100644
index 0000000000..7c101eee5e
--- /dev/null
+++ b/python/qpid/tests/address.py
@@ -0,0 +1,199 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.tests import Test
+from qpid.address import lex, parse, ParseError, EOF, ID, NUMBER, SYM, WSPACE
+from parser import ParserBase
+
+class AddressTests(ParserBase, Test):
+
+ EXCLUDE = (WSPACE, EOF)
+
+ def do_lex(self, st):
+ return lex(st)
+
+ def do_parse(self, st):
+ return parse(st)
+
+ def valid(self, addr, name=None, subject=None, options=None):
+ ParserBase.valid(self, addr, (name, subject, options))
+
+ def testDashInId1(self):
+ self.lex("foo-bar", ID)
+
+ def testDashInId2(self):
+ self.lex("foo-3", ID)
+
+ def testDashAlone1(self):
+ self.lex("foo - bar", ID, SYM, ID)
+
+ def testDashAlone2(self):
+ self.lex("foo - 3", ID, SYM, NUMBER)
+
+ def testLeadingDash(self):
+ self.lex("-foo", SYM, ID)
+
+ def testTrailingDash(self):
+ self.lex("foo-", ID, SYM)
+
+ def testNegativeNum(self):
+ self.lex("-3", NUMBER)
+
+ def testHash(self):
+ self.valid("foo/bar.#", "foo", "bar.#")
+
+ def testStar(self):
+ self.valid("foo/bar.*", "foo", "bar.*")
+
+ def testColon(self):
+ self.valid("foo.bar/baz.qux:moo:arf", "foo.bar", "baz.qux:moo:arf")
+
+ def testOptions(self):
+ self.valid("foo.bar/baz.qux:moo:arf; {key: value}",
+ "foo.bar", "baz.qux:moo:arf", {"key": "value"})
+
+ def testOptionsTrailingComma(self):
+ self.valid("name/subject; {key: value,}", "name", "subject",
+ {"key": "value"})
+
+ def testSemiSubject(self):
+ self.valid("foo.bar/'baz.qux;moo:arf'; {key: value}",
+ "foo.bar", "baz.qux;moo:arf", {"key": "value"})
+
+ def testCommaSubject(self):
+ self.valid("foo.bar/baz.qux.{moo,arf}", "foo.bar", "baz.qux.{moo,arf}")
+
+ def testCommaSubjectOptions(self):
+ self.valid("foo.bar/baz.qux.{moo,arf}; {key: value}", "foo.bar",
+ "baz.qux.{moo,arf}", {"key": "value"})
+
+ def testUnbalanced(self):
+ self.valid("foo.bar/baz.qux.{moo,arf; {key: value}", "foo.bar",
+ "baz.qux.{moo,arf", {"key": "value"})
+
+ def testSlashQuote(self):
+ self.valid("foo.bar\\/baz.qux.{moo,arf; {key: value}",
+ "foo.bar/baz.qux.{moo,arf",
+ None, {"key": "value"})
+
+ def testSlashHexEsc1(self):
+ self.valid("foo.bar\\x00baz.qux.{moo,arf; {key: value}",
+ "foo.bar\x00baz.qux.{moo,arf",
+ None, {"key": "value"})
+
+ def testSlashHexEsc2(self):
+ self.valid("foo.bar\\xffbaz.qux.{moo,arf; {key: value}",
+ "foo.bar\xffbaz.qux.{moo,arf",
+ None, {"key": "value"})
+
+ def testSlashHexEsc3(self):
+ self.valid("foo.bar\\xFFbaz.qux.{moo,arf; {key: value}",
+ "foo.bar\xFFbaz.qux.{moo,arf",
+ None, {"key": "value"})
+
+ def testSlashUnicode1(self):
+ self.valid("foo.bar\\u1234baz.qux.{moo,arf; {key: value}",
+ u"foo.bar\u1234baz.qux.{moo,arf", None, {"key": "value"})
+
+ def testSlashUnicode2(self):
+ self.valid("foo.bar\\u0000baz.qux.{moo,arf; {key: value}",
+ u"foo.bar\u0000baz.qux.{moo,arf", None, {"key": "value"})
+
+ def testSlashUnicode3(self):
+ self.valid("foo.bar\\uffffbaz.qux.{moo,arf; {key: value}",
+ u"foo.bar\uffffbaz.qux.{moo,arf", None, {"key": "value"})
+
+ def testSlashUnicode4(self):
+ self.valid("foo.bar\\uFFFFbaz.qux.{moo,arf; {key: value}",
+ u"foo.bar\uFFFFbaz.qux.{moo,arf", None, {"key": "value"})
+
+ def testNoName(self):
+ self.invalid("; {key: value}",
+ "unexpected token SEMI(';') line:1,0:; {key: value}")
+
+ def testEmpty(self):
+ self.invalid("", "unexpected token EOF line:1,0:")
+
+ def testNoNameSlash(self):
+ self.invalid("/asdf; {key: value}",
+ "unexpected token SLASH('/') line:1,0:/asdf; {key: value}")
+
+ def testBadOptions1(self):
+ self.invalid("name/subject; {",
+ "expecting (ID, RBRACE), got EOF line:1,15:name/subject; {")
+
+ def testBadOptions2(self):
+ self.invalid("name/subject; { 3",
+ "expecting (ID, RBRACE), got NUMBER('3') "
+ "line:1,16:name/subject; { 3")
+
+ def testBadOptions3(self):
+ self.invalid("name/subject; { key:",
+ "expecting (NUMBER, STRING, ID, LBRACE, LBRACK), got EOF "
+ "line:1,20:name/subject; { key:")
+
+ def testBadOptions4(self):
+ self.invalid("name/subject; { key: value",
+ "expecting (COMMA, RBRACE), got EOF "
+ "line:1,26:name/subject; { key: value")
+
+ def testBadOptions5(self):
+ self.invalid("name/subject; { key: value asdf",
+ "expecting (COMMA, RBRACE), got ID('asdf') "
+ "line:1,27:name/subject; { key: value asdf")
+
+ def testBadOptions6(self):
+ self.invalid("name/subject; { key: value,",
+ "expecting (ID, RBRACE), got EOF "
+ "line:1,27:name/subject; { key: value,")
+
+ def testBadOptions7(self):
+ self.invalid("name/subject; { key: value } asdf",
+ "expecting EOF, got ID('asdf') "
+ "line:1,29:name/subject; { key: value } asdf")
+
+ def testList1(self):
+ self.valid("name/subject; { key: [] }", "name", "subject", {"key": []})
+
+ def testList2(self):
+ self.valid("name/subject; { key: ['one'] }", "name", "subject", {"key": ['one']})
+
+ def testList3(self):
+ self.valid("name/subject; { key: [1, 2, 3] }", "name", "subject",
+ {"key": [1, 2, 3]})
+
+ def testList4(self):
+ self.valid("name/subject; { key: [1, [2, 3], 4] }", "name", "subject",
+ {"key": [1, [2, 3], 4]})
+
+ def testBadList1(self):
+ self.invalid("name/subject; { key: [ }", "expecting (NUMBER, STRING, ID, LBRACE, LBRACK), "
+ "got RBRACE('}') line:1,23:name/subject; { key: [ }")
+
+ def testBadList2(self):
+ self.invalid("name/subject; { key: [ 1 }", "expecting (COMMA, RBRACK), "
+ "got RBRACE('}') line:1,25:name/subject; { key: [ 1 }")
+
+ def testBadList3(self):
+ self.invalid("name/subject; { key: [ 1 2 }", "expecting (COMMA, RBRACK), "
+ "got NUMBER('2') line:1,25:name/subject; { key: [ 1 2 }")
+
+ def testBadList4(self):
+ self.invalid("name/subject; { key: [ 1 2 ] }", "expecting (COMMA, RBRACK), "
+ "got NUMBER('2') line:1,25:name/subject; { key: [ 1 2 ] }")
diff --git a/python/qpid/tests/framing.py b/python/qpid/tests/framing.py
new file mode 100644
index 0000000000..0b33df8b9a
--- /dev/null
+++ b/python/qpid/tests/framing.py
@@ -0,0 +1,289 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# setup, usage, teardown, errors(sync), errors(async), stress, soak,
+# boundary-conditions, config
+
+from qpid.tests import Test
+from qpid.framing import *
+
+class Base(Test):
+
+ def cmp_frames(self, frm1, frm2):
+ assert frm1.flags == frm2.flags, "expected: %r, got %r" % (frm1, frm2)
+ assert frm1.type == frm2.type, "expected: %r, got %r" % (frm1, frm2)
+ assert frm1.track == frm2.track, "expected: %r, got %r" % (frm1, frm2)
+ assert frm1.channel == frm2.channel, "expected: %r, got %r" % (frm1, frm2)
+ assert frm1.payload == frm2.payload, "expected: %r, got %r" % (frm1, frm2)
+
+ def cmp_segments(self, seg1, seg2):
+ assert seg1.first == seg2.first, "expected: %r, got %r" % (seg1, seg2)
+ assert seg1.last == seg2.last, "expected: %r, got %r" % (seg1, seg2)
+ assert seg1.type == seg2.type, "expected: %r, got %r" % (seg1, seg2)
+ assert seg1.track == seg2.track, "expected: %r, got %r" % (seg1, seg2)
+ assert seg1.channel == seg2.channel, "expected: %r, got %r" % (seg1, seg2)
+ assert seg1.payload == seg2.payload, "expected: %r, got %r" % (seg1, seg2)
+
+ def cmp_list(self, l1, l2):
+ if l1 is None:
+ assert l2 is None
+ return
+
+ assert len(l1) == len(l2)
+ for v1, v2 in zip(l1, l2):
+ if isinstance(v1, Compound):
+ self.cmp_ops(v1, v2)
+ else:
+ assert v1 == v2
+
+ def cmp_ops(self, op1, op2):
+ if op1 is None:
+ assert op2 is None
+ return
+
+ assert op1.__class__ == op2.__class__
+ cls = op1.__class__
+ assert op1.NAME == op2.NAME
+ assert op1.CODE == op2.CODE
+ assert op1.FIELDS == op2.FIELDS
+ for f in cls.FIELDS:
+ v1 = getattr(op1, f.name)
+ v2 = getattr(op2, f.name)
+ if COMPOUND.has_key(f.type) or f.type == "struct32":
+ self.cmp_ops(v1, v2)
+ elif f.type in ("list", "array"):
+ self.cmp_list(v1, v2)
+ else:
+ assert v1 == v2, "expected: %r, got %r" % (v1, v2)
+
+ if issubclass(cls, Command) or issubclass(cls, Control):
+ assert op1.channel == op2.channel
+
+ if issubclass(cls, Command):
+ assert op1.sync == op2.sync, "expected: %r, got %r" % (op1.sync, op2.sync)
+ assert (op1.headers is None and op2.headers is None) or \
+ (op1.headers is not None and op2.headers is not None)
+ if op1.headers is not None:
+ assert len(op1.headers) == len(op2.headers)
+ for h1, h2 in zip(op1.headers, op2.headers):
+ self.cmp_ops(h1, h2)
+
+class FrameTest(Base):
+
+ def enc_dec(self, frames, encoded=None):
+ enc = FrameEncoder()
+ dec = FrameDecoder()
+
+ enc.write(*frames)
+ bytes = enc.read()
+ if encoded is not None:
+ assert bytes == encoded, "expected %r, got %r" % (encoded, bytes)
+ dec.write(bytes)
+ dframes = dec.read()
+
+ assert len(frames) == len(dframes)
+ for f, df, in zip(frames, dframes):
+ self.cmp_frames(f, df)
+
+ def testEmpty(self):
+ self.enc_dec([Frame(0, 0, 0, 0, "")],
+ "\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00")
+
+ def testSingle(self):
+ self.enc_dec([Frame(0, 0, 0, 1, "payload")],
+ "\x00\x00\x00\x13\x00\x00\x00\x01\x00\x00\x00\x00payload")
+
+ def testMaxChannel(self):
+ self.enc_dec([Frame(0, 0, 0, 65535, "max-channel")],
+ "\x00\x00\x00\x17\x00\x00\xff\xff\x00\x00\x00\x00max-channel")
+
+ def testMaxType(self):
+ self.enc_dec([Frame(0, 255, 0, 0, "max-type")],
+ "\x00\xff\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00max-type")
+
+ def testMaxTrack(self):
+ self.enc_dec([Frame(0, 0, 15, 0, "max-track")],
+ "\x00\x00\x00\x15\x00\x0f\x00\x00\x00\x00\x00\x00max-track")
+
+ def testSequence(self):
+ self.enc_dec([Frame(0, 0, 0, 0, "zero"),
+ Frame(0, 0, 0, 1, "one"),
+ Frame(0, 0, 1, 0, "two"),
+ Frame(0, 0, 1, 1, "three"),
+ Frame(0, 1, 0, 0, "four"),
+ Frame(0, 1, 0, 1, "five"),
+ Frame(0, 1, 1, 0, "six"),
+ Frame(0, 1, 1, 1, "seven"),
+ Frame(1, 0, 0, 0, "eight"),
+ Frame(1, 0, 0, 1, "nine"),
+ Frame(1, 0, 1, 0, "ten"),
+ Frame(1, 0, 1, 1, "eleven"),
+ Frame(1, 1, 0, 0, "twelve"),
+ Frame(1, 1, 0, 1, "thirteen"),
+ Frame(1, 1, 1, 0, "fourteen"),
+ Frame(1, 1, 1, 1, "fifteen")])
+
+class SegmentTest(Base):
+
+ def enc_dec(self, segments, frames=None, interleave=None, max_payload=Frame.MAX_PAYLOAD):
+ enc = SegmentEncoder(max_payload)
+ dec = SegmentDecoder()
+
+ enc.write(*segments)
+ frms = enc.read()
+ if frames is not None:
+ assert len(frames) == len(frms), "expected %s, got %s" % (frames, frms)
+ for f1, f2 in zip(frames, frms):
+ self.cmp_frames(f1, f2)
+ if interleave is not None:
+ ilvd = []
+ for f in frms:
+ ilvd.append(f)
+ if interleave:
+ ilvd.append(interleave.pop(0))
+ ilvd.extend(interleave)
+ dec.write(*ilvd)
+ else:
+ dec.write(*frms)
+ segs = dec.read()
+ assert len(segments) == len(segs)
+ for s1, s2 in zip(segments, segs):
+ self.cmp_segments(s1, s2)
+
+ def testEmpty(self):
+ self.enc_dec([Segment(True, True, 0, 0, 0, "")],
+ [Frame(FIRST_FRM | LAST_FRM | FIRST_SEG | LAST_SEG, 0, 0, 0,
+ "")])
+
+ def testSingle(self):
+ self.enc_dec([Segment(True, True, 0, 0, 0, "payload")],
+ [Frame(FIRST_FRM | LAST_FRM | FIRST_SEG | LAST_SEG, 0, 0, 0,
+ "payload")])
+
+ def testMaxChannel(self):
+ self.enc_dec([Segment(False, False, 0, 0, 65535, "max-channel")],
+ [Frame(FIRST_FRM | LAST_FRM, 0, 0, 65535, "max-channel")])
+
+ def testMaxType(self):
+ self.enc_dec([Segment(False, False, 255, 0, 0, "max-type")],
+ [Frame(FIRST_FRM | LAST_FRM, 255, 0, 0, "max-type")])
+
+ def testMaxTrack(self):
+ self.enc_dec([Segment(False, False, 0, 15, 0, "max-track")],
+ [Frame(FIRST_FRM | LAST_FRM, 0, 15, 0, "max-track")])
+
+ def testSequence(self):
+ self.enc_dec([Segment(True, False, 0, 0, 0, "one"),
+ Segment(False, False, 0, 0, 0, "two"),
+ Segment(False, True, 0, 0, 0, "three")],
+ [Frame(FIRST_FRM | LAST_FRM | FIRST_SEG, 0, 0, 0, "one"),
+ Frame(FIRST_FRM | LAST_FRM, 0, 0, 0, "two"),
+ Frame(FIRST_FRM | LAST_FRM | LAST_SEG, 0, 0, 0, "three")])
+
+ def testInterleaveChannel(self):
+ frames = [Frame(0, 0, 0, 0, chr(ord("a") + i)) for i in range(7)]
+ frames[0].flags |= FIRST_FRM
+ frames[-1].flags |= LAST_FRM
+
+ ilvd = [Frame(0, 0, 0, 1, chr(ord("a") + i)) for i in range(7)]
+
+ self.enc_dec([Segment(False, False, 0, 0, 0, "abcdefg")], frames, ilvd, max_payload=1)
+
+ def testInterleaveTrack(self):
+ frames = [Frame(0, 0, 0, 0, "%c%c" % (ord("a") + i, ord("a") + i + 1))
+ for i in range(0, 8, 2)]
+ frames[0].flags |= FIRST_FRM
+ frames[-1].flags |= LAST_FRM
+
+ ilvd = [Frame(0, 0, 1, 0, "%c%c" % (ord("a") + i, ord("a") + i + 1))
+ for i in range(0, 8, 2)]
+
+ self.enc_dec([Segment(False, False, 0, 0, 0, "abcdefgh")], frames, ilvd, max_payload=2)
+
+from qpid.ops import *
+
+class OpTest(Base):
+
+ def enc_dec(self, ops):
+ enc = OpEncoder()
+ dec = OpDecoder()
+ enc.write(*ops)
+ segs = enc.read()
+ dec.write(*segs)
+ dops = dec.read()
+ assert len(ops) == len(dops)
+ for op1, op2 in zip(ops, dops):
+ self.cmp_ops(op1, op2)
+
+ def testEmtpyMT(self):
+ self.enc_dec([MessageTransfer()])
+
+ def testEmptyMTSync(self):
+ self.enc_dec([MessageTransfer(sync=True)])
+
+ def testMT(self):
+ self.enc_dec([MessageTransfer(destination="asdf")])
+
+ def testSyncMT(self):
+ self.enc_dec([MessageTransfer(destination="asdf", sync=True)])
+
+ def testEmptyPayloadMT(self):
+ self.enc_dec([MessageTransfer(payload="")])
+
+ def testPayloadMT(self):
+ self.enc_dec([MessageTransfer(payload="test payload")])
+
+ def testHeadersEmptyPayloadMT(self):
+ self.enc_dec([MessageTransfer(headers=[DeliveryProperties()])])
+
+ def testHeadersPayloadMT(self):
+ self.enc_dec([MessageTransfer(headers=[DeliveryProperties()], payload="test payload")])
+
+ def testMultiHeadersEmptyPayloadMT(self):
+ self.enc_dec([MessageTransfer(headers=[DeliveryProperties(), MessageProperties()])])
+
+ def testMultiHeadersPayloadMT(self):
+ self.enc_dec([MessageTransfer(headers=[MessageProperties(), DeliveryProperties()], payload="test payload")])
+
+ def testContentTypeHeadersPayloadMT(self):
+ self.enc_dec([MessageTransfer(headers=[MessageProperties(content_type="text/plain")], payload="test payload")])
+
+ def testMulti(self):
+ self.enc_dec([MessageTransfer(),
+ MessageTransfer(sync=True),
+ MessageTransfer(destination="one"),
+ MessageTransfer(destination="two", sync=True),
+ MessageTransfer(destination="three", payload="test payload")])
+
+ def testControl(self):
+ self.enc_dec([SessionAttach(name="asdf")])
+
+ def testMixed(self):
+ self.enc_dec([SessionAttach(name="fdsa"), MessageTransfer(destination="test")])
+
+ def testChannel(self):
+ self.enc_dec([SessionAttach(name="asdf", channel=3), MessageTransfer(destination="test", channel=1)])
+
+ def testCompound(self):
+ self.enc_dec([MessageTransfer(headers=[MessageProperties(reply_to=ReplyTo(exchange="exch", routing_key="rk"))])])
+
+ def testListCompound(self):
+ self.enc_dec([ExecutionResult(value=RecoverResult(in_doubt=[Xid(global_id="one"),
+ Xid(global_id="two"),
+ Xid(global_id="three")]))])
diff --git a/python/qpid/tests/messaging.py b/python/qpid/tests/messaging.py
new file mode 100644
index 0000000000..f2a270192e
--- /dev/null
+++ b/python/qpid/tests/messaging.py
@@ -0,0 +1,929 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# setup, usage, teardown, errors(sync), errors(async), stress, soak,
+# boundary-conditions, config
+
+import time
+from qpid import compat
+from qpid.tests import Test
+from qpid.harness import Skipped
+from qpid.messaging import Connection, ConnectError, Disconnected, Empty, \
+ InsufficientCapacity, Message, ReceiveError, SendError, SessionError, \
+ UNLIMITED, uuid4
+from Queue import Queue, Empty as QueueEmpty
+
+class Base(Test):
+
+ def setup_connection(self):
+ return None
+
+ def setup_session(self):
+ return None
+
+ def setup_sender(self):
+ return None
+
+ def setup_receiver(self):
+ return None
+
+ def setup(self):
+ self.test_id = uuid4()
+ self.broker = self.config.broker
+ try:
+ self.conn = self.setup_connection()
+ except ConnectError, e:
+ raise Skipped(e)
+ self.ssn = self.setup_session()
+ self.snd = self.setup_sender()
+ if self.snd is not None:
+ self.snd.durable = self.durable()
+ self.rcv = self.setup_receiver()
+
+ def teardown(self):
+ if self.conn is not None and self.conn.connected():
+ self.conn.close()
+
+ def content(self, base, count = None):
+ if count is None:
+ return "%s[%s]" % (base, self.test_id)
+ else:
+ return "%s[%s, %s]" % (base, count, self.test_id)
+
+ def ping(self, ssn):
+ PING_Q = 'ping-queue; {create: always, delete: always}'
+ # send a message
+ sender = ssn.sender(PING_Q, durable=self.durable())
+ content = self.content("ping")
+ sender.send(content)
+ receiver = ssn.receiver(PING_Q)
+ msg = receiver.fetch(0)
+ ssn.acknowledge()
+ assert msg.content == content, "expected %r, got %r" % (content, msg.content)
+
+ def drain(self, rcv, limit=None, timeout=0, expected=None):
+ contents = []
+ try:
+ while limit is None or len(contents) < limit:
+ contents.append(rcv.fetch(timeout=timeout).content)
+ except Empty:
+ pass
+ if expected is not None:
+ assert expected == contents, "expected %s, got %s" % (expected, contents)
+ return contents
+
+ def assertEmpty(self, rcv):
+ contents = self.drain(rcv)
+ assert len(contents) == 0, "%s is supposed to be empty: %s" % (rcv, contents)
+
+ def assertPending(self, rcv, expected):
+ p = rcv.pending()
+ assert p == expected, "expected %s, got %s" % (expected, p)
+
+ def sleep(self):
+ time.sleep(self.delay())
+
+ def delay(self):
+ return float(self.config.defines.get("delay", "2"))
+
+ def get_bool(self, name):
+ return self.config.defines.get(name, "false").lower() in ("true", "yes", "1")
+
+ def durable(self):
+ return self.get_bool("durable")
+
+ def reconnect(self):
+ return self.get_bool("reconnect")
+
+class SetupTests(Base):
+
+ def testOpen(self):
+ # XXX: need to flesh out URL support/syntax
+ self.conn = Connection.open(self.broker.host, self.broker.port,
+ reconnect=self.reconnect())
+ self.ping(self.conn.session())
+
+ def testConnect(self):
+ # XXX: need to flesh out URL support/syntax
+ self.conn = Connection(self.broker.host, self.broker.port,
+ reconnect=self.reconnect())
+ self.conn.connect()
+ self.ping(self.conn.session())
+
+ def testConnectError(self):
+ try:
+ self.conn = Connection.open("localhost", 0)
+ assert False, "connect succeeded"
+ except ConnectError, e:
+ # XXX: should verify that e includes appropriate diagnostic info
+ pass
+
+class ConnectionTests(Base):
+
+ def setup_connection(self):
+ return Connection.open(self.broker.host, self.broker.port,
+ reconnect=self.reconnect())
+
+ def testSessionAnon(self):
+ ssn1 = self.conn.session()
+ ssn2 = self.conn.session()
+ self.ping(ssn1)
+ self.ping(ssn2)
+ assert ssn1 is not ssn2
+
+ def testSessionNamed(self):
+ ssn1 = self.conn.session("one")
+ ssn2 = self.conn.session("two")
+ self.ping(ssn1)
+ self.ping(ssn2)
+ assert ssn1 is not ssn2
+ assert ssn1 is self.conn.session("one")
+ assert ssn2 is self.conn.session("two")
+
+ def testDisconnect(self):
+ ssn = self.conn.session()
+ self.ping(ssn)
+ self.conn.disconnect()
+ try:
+ self.ping(ssn)
+ assert False, "ping succeeded"
+ except Disconnected:
+ # this is the expected failure when pinging on a disconnected
+ # connection
+ pass
+ self.conn.connect()
+ self.ping(ssn)
+
+ def testClose(self):
+ self.conn.close()
+ assert not self.conn.connected()
+
+ACK_QC = 'test-ack-queue; {create: always}'
+ACK_QD = 'test-ack-queue; {delete: always}'
+
+class SessionTests(Base):
+
+ def setup_connection(self):
+ return Connection.open(self.broker.host, self.broker.port,
+ reconnect=self.reconnect())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def testSender(self):
+ snd = self.ssn.sender('test-snd-queue; {create: sender, delete: receiver}',
+ durable=self.durable())
+ snd2 = self.ssn.sender(snd.target, durable=self.durable())
+ assert snd is not snd2
+ snd2.close()
+
+ content = self.content("testSender")
+ snd.send(content)
+ rcv = self.ssn.receiver(snd.target)
+ msg = rcv.fetch(0)
+ assert msg.content == content
+ self.ssn.acknowledge(msg)
+
+ def testReceiver(self):
+ rcv = self.ssn.receiver('test-rcv-queue; {create: always}')
+ rcv2 = self.ssn.receiver(rcv.source)
+ assert rcv is not rcv2
+ rcv2.close()
+
+ content = self.content("testReceiver")
+ snd = self.ssn.sender(rcv.source, durable=self.durable())
+ snd.send(content)
+ msg = rcv.fetch(0)
+ assert msg.content == content
+ self.ssn.acknowledge(msg)
+ snd2 = self.ssn.receiver('test-rcv-queue; {delete: always}')
+
+ def testNextReceiver(self):
+ ADDR = 'test-next-rcv-queue; {create: always, delete: always}'
+ rcv1 = self.ssn.receiver(ADDR, capacity=UNLIMITED)
+ rcv2 = self.ssn.receiver(ADDR, capacity=UNLIMITED)
+ rcv3 = self.ssn.receiver(ADDR, capacity=UNLIMITED)
+
+ snd = self.ssn.sender(ADDR)
+
+ msgs = []
+ for i in range(10):
+ content = self.content("testNextReceiver", i)
+ snd.send(content)
+ msgs.append(content)
+
+ fetched = []
+ try:
+ while True:
+ rcv = self.ssn.next_receiver(timeout=self.delay())
+ assert rcv in (rcv1, rcv2, rcv3)
+ assert rcv.pending() > 0
+ fetched.append(rcv.fetch().content)
+ except Empty:
+ pass
+ assert msgs == fetched, "expecting %s, got %s" % (msgs, fetched)
+ self.ssn.acknowledge()
+
+ # XXX, we need a convenient way to assert that required queues are
+ # empty on setup, and possibly also to drain queues on teardown
+ def ackTest(self, acker, ack_capacity=None):
+ # send a bunch of messages
+ snd = self.ssn.sender(ACK_QC, durable=self.durable())
+ contents = [self.content("ackTest", i) for i in range(15)]
+ for c in contents:
+ snd.send(c)
+
+ # drain the queue, verify the messages are there and then close
+ # without acking
+ rcv = self.ssn.receiver(ACK_QC)
+ self.drain(rcv, expected=contents)
+ self.ssn.close()
+
+ # drain the queue again, verify that they are all the messages
+ # were requeued, and ack this time before closing
+ self.ssn = self.conn.session()
+ if ack_capacity is not None:
+ self.ssn.ack_capacity = ack_capacity
+ rcv = self.ssn.receiver(ACK_QC)
+ self.drain(rcv, expected=contents)
+ acker(self.ssn)
+ self.ssn.close()
+
+ # drain the queue a final time and verify that the messages were
+ # dequeued
+ self.ssn = self.conn.session()
+ rcv = self.ssn.receiver(ACK_QD)
+ self.assertEmpty(rcv)
+
+ def testAcknowledge(self):
+ self.ackTest(lambda ssn: ssn.acknowledge())
+
+ def testAcknowledgeAsync(self):
+ self.ackTest(lambda ssn: ssn.acknowledge(sync=False))
+
+ def testAcknowledgeAsyncAckCap0(self):
+ try:
+ try:
+ self.ackTest(lambda ssn: ssn.acknowledge(sync=False), 0)
+ assert False, "acknowledge shouldn't succeed with ack_capacity of zero"
+ except InsufficientCapacity:
+ pass
+ finally:
+ self.ssn.ack_capacity = UNLIMITED
+ self.drain(self.ssn.receiver(ACK_QD))
+ self.ssn.acknowledge()
+
+ def testAcknowledgeAsyncAckCap1(self):
+ self.ackTest(lambda ssn: ssn.acknowledge(sync=False), 1)
+
+ def testAcknowledgeAsyncAckCap5(self):
+ self.ackTest(lambda ssn: ssn.acknowledge(sync=False), 5)
+
+ def testAcknowledgeAsyncAckCapUNLIMITED(self):
+ self.ackTest(lambda ssn: ssn.acknowledge(sync=False), UNLIMITED)
+
+ def send(self, ssn, queue, base, count=1):
+ snd = ssn.sender(queue, durable=self.durable())
+ contents = []
+ for i in range(count):
+ c = self.content(base, i)
+ snd.send(c)
+ contents.append(c)
+ snd.close()
+ return contents
+
+ def txTest(self, commit):
+ TX_Q = 'test-tx-queue; {create: sender, delete: receiver}'
+ TX_Q_COPY = 'test-tx-queue-copy; {create: always, delete: always}'
+ txssn = self.conn.session(transactional=True)
+ contents = self.send(self.ssn, TX_Q, "txTest", 3)
+ txrcv = txssn.receiver(TX_Q)
+ txsnd = txssn.sender(TX_Q_COPY, durable=self.durable())
+ rcv = self.ssn.receiver(txrcv.source)
+ copy_rcv = self.ssn.receiver(txsnd.target)
+ self.assertEmpty(copy_rcv)
+ for i in range(3):
+ m = txrcv.fetch(0)
+ txsnd.send(m)
+ self.assertEmpty(copy_rcv)
+ txssn.acknowledge()
+ if commit:
+ txssn.commit()
+ self.assertEmpty(rcv)
+ assert contents == self.drain(copy_rcv)
+ else:
+ txssn.rollback()
+ assert contents == self.drain(rcv)
+ self.assertEmpty(copy_rcv)
+ self.ssn.acknowledge()
+
+ def testCommit(self):
+ self.txTest(True)
+
+ def testRollback(self):
+ self.txTest(False)
+
+ def txTestSend(self, commit):
+ TX_SEND_Q = 'test-tx-send-queue; {create: sender, delete: receiver}'
+ txssn = self.conn.session(transactional=True)
+ contents = self.send(txssn, TX_SEND_Q, "txTestSend", 3)
+ rcv = self.ssn.receiver(TX_SEND_Q)
+ self.assertEmpty(rcv)
+
+ if commit:
+ txssn.commit()
+ assert contents == self.drain(rcv)
+ self.ssn.acknowledge()
+ else:
+ txssn.rollback()
+ self.assertEmpty(rcv)
+ txssn.commit()
+ self.assertEmpty(rcv)
+
+ def testCommitSend(self):
+ self.txTestSend(True)
+
+ def testRollbackSend(self):
+ self.txTestSend(False)
+
+ def txTestAck(self, commit):
+ TX_ACK_QC = 'test-tx-ack-queue; {create: always}'
+ TX_ACK_QD = 'test-tx-ack-queue; {delete: always}'
+ txssn = self.conn.session(transactional=True)
+ txrcv = txssn.receiver(TX_ACK_QC)
+ self.assertEmpty(txrcv)
+ contents = self.send(self.ssn, TX_ACK_QC, "txTestAck", 3)
+ assert contents == self.drain(txrcv)
+
+ if commit:
+ txssn.acknowledge()
+ else:
+ txssn.rollback()
+ drained = self.drain(txrcv)
+ assert contents == drained, "expected %s, got %s" % (contents, drained)
+ txssn.acknowledge()
+ txssn.rollback()
+ assert contents == self.drain(txrcv)
+ txssn.commit() # commit without ack
+ self.assertEmpty(txrcv)
+
+ txssn.close()
+
+ txssn = self.conn.session(transactional=True)
+ txrcv = txssn.receiver(TX_ACK_QC)
+ assert contents == self.drain(txrcv)
+ txssn.acknowledge()
+ txssn.commit()
+ rcv = self.ssn.receiver(TX_ACK_QD)
+ self.assertEmpty(rcv)
+ txssn.close()
+ self.assertEmpty(rcv)
+
+ def testCommitAck(self):
+ self.txTestAck(True)
+
+ def testRollbackAck(self):
+ self.txTestAck(False)
+
+ def testClose(self):
+ self.ssn.close()
+ try:
+ self.ping(self.ssn)
+ assert False, "ping succeeded"
+ except Disconnected:
+ pass
+
+RECEIVER_Q = 'test-receiver-queue; {create: always, delete: always}'
+
+class ReceiverTests(Base):
+
+ def setup_connection(self):
+ return Connection.open(self.broker.host, self.broker.port,
+ reconnect=self.reconnect())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def setup_sender(self):
+ return self.ssn.sender(RECEIVER_Q)
+
+ def setup_receiver(self):
+ return self.ssn.receiver(RECEIVER_Q)
+
+ def send(self, base, count = None):
+ content = self.content(base, count)
+ self.snd.send(content)
+ return content
+
+ def testFetch(self):
+ try:
+ msg = self.rcv.fetch(0)
+ assert False, "unexpected message: %s" % msg
+ except Empty:
+ pass
+ try:
+ start = time.time()
+ msg = self.rcv.fetch(self.delay())
+ assert False, "unexpected message: %s" % msg
+ except Empty:
+ elapsed = time.time() - start
+ assert elapsed >= self.delay()
+
+ one = self.send("testFetch", 1)
+ two = self.send("testFetch", 2)
+ three = self.send("testFetch", 3)
+ msg = self.rcv.fetch(0)
+ assert msg.content == one
+ msg = self.rcv.fetch(self.delay())
+ assert msg.content == two
+ msg = self.rcv.fetch()
+ assert msg.content == three
+ self.ssn.acknowledge()
+
+ def testCapacityIncrease(self):
+ content = self.send("testCapacityIncrease")
+ self.sleep()
+ assert self.rcv.pending() == 0
+ self.rcv.capacity = UNLIMITED
+ self.sleep()
+ assert self.rcv.pending() == 1
+ msg = self.rcv.fetch(0)
+ assert msg.content == content
+ assert self.rcv.pending() == 0
+ self.ssn.acknowledge()
+
+ def testCapacityDecrease(self):
+ self.rcv.capacity = UNLIMITED
+ one = self.send("testCapacityDecrease", 1)
+ self.sleep()
+ assert self.rcv.pending() == 1
+ msg = self.rcv.fetch(0)
+ assert msg.content == one
+
+ self.rcv.capacity = 0
+
+ two = self.send("testCapacityDecrease", 2)
+ self.sleep()
+ assert self.rcv.pending() == 0
+ msg = self.rcv.fetch(0)
+ assert msg.content == two
+
+ self.ssn.acknowledge()
+
+ def testCapacity(self):
+ self.rcv.capacity = 5
+ self.assertPending(self.rcv, 0)
+
+ for i in range(15):
+ self.send("testCapacity", i)
+ self.sleep()
+ self.assertPending(self.rcv, 5)
+
+ self.drain(self.rcv, limit = 5)
+ self.sleep()
+ self.assertPending(self.rcv, 5)
+
+ drained = self.drain(self.rcv)
+ assert len(drained) == 10, "%s, %s" % (len(drained), drained)
+ self.assertPending(self.rcv, 0)
+
+ self.ssn.acknowledge()
+
+ def testCapacityUNLIMITED(self):
+ self.rcv.capacity = UNLIMITED
+ self.assertPending(self.rcv, 0)
+
+ for i in range(10):
+ self.send("testCapacityUNLIMITED", i)
+ self.sleep()
+ self.assertPending(self.rcv, 10)
+
+ self.drain(self.rcv)
+ self.assertPending(self.rcv, 0)
+
+ self.ssn.acknowledge()
+
+ def testPending(self):
+ self.rcv.capacity = UNLIMITED
+ assert self.rcv.pending() == 0
+
+ for i in range(3):
+ self.send("testPending", i)
+ self.sleep()
+ assert self.rcv.pending() == 3
+
+ for i in range(3, 10):
+ self.send("testPending", i)
+ self.sleep()
+ assert self.rcv.pending() == 10
+
+ self.drain(self.rcv, limit=3)
+ assert self.rcv.pending() == 7
+
+ self.drain(self.rcv)
+ assert self.rcv.pending() == 0
+
+ self.ssn.acknowledge()
+
+ # XXX: need testClose
+
+class AddressTests(Base):
+
+ def setup_connection(self):
+ return Connection.open(self.broker.host, self.broker.port,
+ reconnect=self.reconnect())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def testBadOption(self):
+ snd = self.ssn.sender("test-bad-option; {create: always, node-properties: {this-property-does-not-exist: 3}}")
+ try:
+ snd.send("ping")
+ except SendError, e:
+ assert "unrecognized option" in str(e)
+
+ def testCreateQueue(self):
+ snd = self.ssn.sender("test-create-queue; {create: always, delete: always, "
+ "node-properties: {type: queue, durable: False, "
+ "x-properties: {auto_delete: true}}}")
+ content = self.content("testCreateQueue")
+ snd.send(content)
+ rcv = self.ssn.receiver("test-create-queue")
+ self.drain(rcv, expected=[content])
+
+ def createExchangeTest(self, props=""):
+ addr = """test-create-exchange; {
+ create: always,
+ delete: always,
+ node-properties: {
+ type: topic,
+ durable: False,
+ x-properties: {auto_delete: true, %s}
+ }
+ }""" % props
+ snd = self.ssn.sender(addr)
+ snd.send("ping")
+ rcv1 = self.ssn.receiver("test-create-exchange/first")
+ rcv2 = self.ssn.receiver("test-create-exchange/first")
+ rcv3 = self.ssn.receiver("test-create-exchange/second")
+ for r in (rcv1, rcv2, rcv3):
+ try:
+ r.fetch(0)
+ assert False
+ except Empty:
+ pass
+ msg1 = Message(self.content("testCreateExchange", 1), subject="first")
+ msg2 = Message(self.content("testCreateExchange", 2), subject="second")
+ snd.send(msg1)
+ snd.send(msg2)
+ self.drain(rcv1, expected=[msg1.content])
+ self.drain(rcv2, expected=[msg1.content])
+ self.drain(rcv3, expected=[msg2.content])
+
+ def testCreateExchange(self):
+ self.createExchangeTest()
+
+ def testCreateExchangeDirect(self):
+ self.createExchangeTest("type: direct")
+
+ def testCreateExchangeTopic(self):
+ self.createExchangeTest("type: topic")
+
+ def testDeleteBySender(self):
+ snd = self.ssn.sender("test-delete; {create: always}")
+ snd.send("ping")
+ snd.close()
+ snd = self.ssn.sender("test-delete; {delete: always}")
+ snd.send("ping")
+ snd.close()
+ snd = self.ssn.sender("test-delete")
+ try:
+ snd.send("ping")
+ except SendError, e:
+ assert "no such queue" in str(e)
+
+ def testDeleteByReceiver(self):
+ rcv = self.ssn.receiver("test-delete; {create: always, delete: always}")
+ try:
+ rcv.fetch(0)
+ except Empty:
+ pass
+ rcv.close()
+
+ try:
+ self.ssn.receiver("test-delete")
+ except SendError, e:
+ assert "no such queue" in str(e)
+
+ def testDeleteSpecial(self):
+ snd = self.ssn.sender("amq.topic; {delete: always}")
+ snd.send("asdf")
+ try:
+ snd.close()
+ except SessionError, e:
+ assert "Cannot delete default exchange" in str(e)
+ # XXX: need to figure out close after error
+ self.conn._remove_session(self.ssn)
+
+ def testBindings(self):
+ snd = self.ssn.sender("""
+test-bindings-queue; {
+ create: always,
+ delete: always,
+ node-properties: {
+ x-properties: {
+ bindings: ["amq.topic/a.#", "amq.direct/b", "amq.topic/c.*"]
+ }
+ }
+}
+""")
+ snd.send("one")
+ snd_a = self.ssn.sender("amq.topic/a.foo")
+ snd_b = self.ssn.sender("amq.direct/b")
+ snd_c = self.ssn.sender("amq.topic/c.bar")
+ snd_a.send("two")
+ snd_b.send("three")
+ snd_c.send("four")
+ rcv = self.ssn.receiver("test-bindings-queue")
+ self.drain(rcv, expected=["one", "two", "three", "four"])
+
+NOSUCH_Q = "this-queue-should-not-exist"
+UNPARSEABLE_ADDR = "name/subject; {bad options"
+UNLEXABLE_ADDR = "\0x0\0x1\0x2\0x3"
+
+class AddressErrorTests(Base):
+
+ def setup_connection(self):
+ return Connection.open(self.broker.host, self.broker.port,
+ reconnect=self.reconnect())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def sendErrorTest(self, addr, exc, check=lambda e: True):
+ snd = self.ssn.sender(addr, durable=self.durable())
+ try:
+ snd.send("hello")
+ assert False, "send succeeded"
+ except exc, e:
+ assert check(e), "unexpected error: %s" % compat.format_exc(e)
+ snd.close()
+
+ def fetchErrorTest(self, addr, exc, check=lambda e: True):
+ rcv = self.ssn.receiver(addr)
+ try:
+ rcv.fetch(timeout=0)
+ assert False, "fetch succeeded"
+ except exc, e:
+ assert check(e), "unexpected error: %s" % compat.format_exc(e)
+ rcv.close()
+
+ def testNoneTarget(self):
+ # XXX: should have specific exception for this
+ self.sendErrorTest(None, SendError)
+
+ def testNoneSource(self):
+ # XXX: should have specific exception for this
+ self.fetchErrorTest(None, ReceiveError)
+
+ def testNoTarget(self):
+ # XXX: should have specific exception for this
+ self.sendErrorTest(NOSUCH_Q, SendError, lambda e: NOSUCH_Q in str(e))
+
+ def testNoSource(self):
+ # XXX: should have specific exception for this
+ self.fetchErrorTest(NOSUCH_Q, ReceiveError, lambda e: NOSUCH_Q in str(e))
+
+ def testUnparseableTarget(self):
+ # XXX: should have specific exception for this
+ self.sendErrorTest(UNPARSEABLE_ADDR, SendError,
+ lambda e: "expecting COLON" in str(e))
+
+ def testUnparseableSource(self):
+ # XXX: should have specific exception for this
+ self.fetchErrorTest(UNPARSEABLE_ADDR, ReceiveError,
+ lambda e: "expecting COLON" in str(e))
+
+ def testUnlexableTarget(self):
+ # XXX: should have specific exception for this
+ self.sendErrorTest(UNLEXABLE_ADDR, SendError,
+ lambda e: "unrecognized characters" in str(e))
+
+ def testUnlexableSource(self):
+ # XXX: should have specific exception for this
+ self.fetchErrorTest(UNLEXABLE_ADDR, ReceiveError,
+ lambda e: "unrecognized characters" in str(e))
+
+SENDER_Q = 'test-sender-q; {create: always, delete: always}'
+
+class SenderTests(Base):
+
+ def setup_connection(self):
+ return Connection.open(self.broker.host, self.broker.port,
+ reconnect=self.reconnect())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def setup_sender(self):
+ return self.ssn.sender(SENDER_Q)
+
+ def setup_receiver(self):
+ return self.ssn.receiver(SENDER_Q)
+
+ def checkContent(self, content):
+ self.snd.send(content)
+ msg = self.rcv.fetch(0)
+ assert msg.content == content
+
+ out = Message(content)
+ self.snd.send(out)
+ echo = self.rcv.fetch(0)
+ assert out.content == echo.content
+ assert echo.content == msg.content
+ self.ssn.acknowledge()
+
+ def testSendString(self):
+ self.checkContent(self.content("testSendString"))
+
+ def testSendList(self):
+ self.checkContent(["testSendList", 1, 3.14, self.test_id])
+
+ def testSendMap(self):
+ self.checkContent({"testSendMap": self.test_id, "pie": "blueberry", "pi": 3.14})
+
+ def asyncTest(self, capacity):
+ self.snd.capacity = capacity
+ msgs = [self.content("asyncTest", i) for i in range(15)]
+ for m in msgs:
+ self.snd.send(m, sync=False)
+ drained = self.drain(self.rcv, timeout=self.delay())
+ assert msgs == drained, "expected %s, got %s" % (msgs, drained)
+ self.ssn.acknowledge()
+
+ def testSendAsyncCapacity0(self):
+ try:
+ self.asyncTest(0)
+ assert False, "send shouldn't succeed with zero capacity"
+ except InsufficientCapacity:
+ # this is expected
+ pass
+
+ def testSendAsyncCapacity1(self):
+ self.asyncTest(1)
+
+ def testSendAsyncCapacity5(self):
+ self.asyncTest(5)
+
+ def testSendAsyncCapacityUNLIMITED(self):
+ self.asyncTest(UNLIMITED)
+
+ def testCapacityTimeout(self):
+ self.snd.capacity = 1
+ msgs = []
+ caught = False
+ while len(msgs) < 100:
+ m = self.content("testCapacity", len(msgs))
+ try:
+ self.snd.send(m, sync=False, timeout=0)
+ msgs.append(m)
+ except InsufficientCapacity:
+ caught = True
+ break
+ self.snd.sync()
+ self.drain(self.rcv, expected=msgs)
+ self.ssn.acknowledge()
+ assert caught, "did not exceed capacity"
+
+class MessageTests(Base):
+
+ def testCreateString(self):
+ m = Message("string")
+ assert m.content == "string"
+ assert m.content_type is None
+
+ def testCreateUnicode(self):
+ m = Message(u"unicode")
+ assert m.content == u"unicode"
+ assert m.content_type == "text/plain"
+
+ def testCreateMap(self):
+ m = Message({})
+ assert m.content == {}
+ assert m.content_type == "amqp/map"
+
+ def testCreateList(self):
+ m = Message([])
+ assert m.content == []
+ assert m.content_type == "amqp/list"
+
+ def testContentTypeOverride(self):
+ m = Message()
+ m.content_type = "text/html; charset=utf8"
+ m.content = u"<html/>"
+ assert m.content_type == "text/html; charset=utf8"
+
+ECHO_Q = 'test-message-echo-queue; {create: always, delete: always}'
+
+class MessageEchoTests(Base):
+
+ def setup_connection(self):
+ return Connection.open(self.broker.host, self.broker.port,
+ reconnect=self.reconnect())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def setup_sender(self):
+ return self.ssn.sender(ECHO_Q)
+
+ def setup_receiver(self):
+ return self.ssn.receiver(ECHO_Q)
+
+ def check(self, msg):
+ self.snd.send(msg)
+ echo = self.rcv.fetch(0)
+
+ assert msg.id == echo.id
+ assert msg.subject == echo.subject
+ assert msg.user_id == echo.user_id
+ assert msg.to == echo.to
+ assert msg.reply_to == echo.reply_to
+ assert msg.correlation_id == echo.correlation_id
+ assert msg.properties == echo.properties
+ assert msg.content_type == echo.content_type
+ assert msg.content == echo.content, "%s, %s" % (msg, echo)
+
+ self.ssn.acknowledge(echo)
+
+ def testStringContent(self):
+ self.check(Message("string"))
+
+ def testUnicodeContent(self):
+ self.check(Message(u"unicode"))
+
+
+ TEST_MAP = {"key1": "string",
+ "key2": u"unicode",
+ "key3": 3,
+ "key4": -3,
+ "key5": 3.14,
+ "key6": -3.14,
+ "key7": ["one", 2, 3.14],
+ "key8": [],
+ "key9": {"sub-key0": 3}}
+
+ def testMapContent(self):
+ self.check(Message(MessageEchoTests.TEST_MAP))
+
+ def testListContent(self):
+ self.check(Message([]))
+ self.check(Message([1, 2, 3]))
+ self.check(Message(["one", 2, 3.14, {"four": 4}]))
+
+ def testProperties(self):
+ msg = Message()
+ msg.to = "to-address"
+ msg.subject = "subject"
+ msg.correlation_id = str(self.test_id)
+ msg.properties = MessageEchoTests.TEST_MAP
+ msg.reply_to = "reply-address"
+ self.check(msg)
+
+class TestTestsXXX(Test):
+
+ def testFoo(self):
+ print "this test has output"
+
+ def testBar(self):
+ print "this test "*8
+ print "has"*10
+ print "a"*75
+ print "lot of"*10
+ print "output"*10
+
+ def testQux(self):
+ import sys
+ sys.stdout.write("this test has output with no newline")
+
+ def testQuxFail(self):
+ import sys
+ sys.stdout.write("this test has output with no newline")
+ fdsa
diff --git a/python/qpid/tests/mimetype.py b/python/qpid/tests/mimetype.py
new file mode 100644
index 0000000000..22760316f0
--- /dev/null
+++ b/python/qpid/tests/mimetype.py
@@ -0,0 +1,56 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.tests import Test
+from qpid.mimetype import lex, parse, ParseError, EOF, WSPACE
+from parser import ParserBase
+
+class MimeTypeTests(ParserBase, Test):
+
+ EXCLUDE = (WSPACE, EOF)
+
+ def do_lex(self, st):
+ return lex(st)
+
+ def do_parse(self, st):
+ return parse(st)
+
+ def valid(self, addr, type=None, subtype=None, parameters=None):
+ ParserBase.valid(self, addr, (type, subtype, parameters))
+
+ def testTypeOnly(self):
+ self.invalid("type", "expecting SLASH, got EOF line:1,4:type")
+
+ def testTypeSubtype(self):
+ self.valid("type/subtype", "type", "subtype", [])
+
+ def testTypeSubtypeParam(self):
+ self.valid("type/subtype ; name=value",
+ "type", "subtype", [("name", "value")])
+
+ def testTypeSubtypeParamComment(self):
+ self.valid("type/subtype ; name(This is a comment.)=value",
+ "type", "subtype", [("name", "value")])
+
+ def testMultipleParams(self):
+ self.valid("type/subtype ; name1=value1 ; name2=value2",
+ "type", "subtype", [("name1", "value1"), ("name2", "value2")])
+
+ def testCaseInsensitivity(self):
+ self.valid("Type/Subtype", "type", "subtype", [])
diff --git a/python/qpid/tests/parser.py b/python/qpid/tests/parser.py
new file mode 100644
index 0000000000..a4865cc9fe
--- /dev/null
+++ b/python/qpid/tests/parser.py
@@ -0,0 +1,37 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.parser import ParseError
+
+class ParserBase:
+
+ def lex(self, addr, *types):
+ toks = [t.type for t in self.do_lex(addr) if t.type not in self.EXCLUDE]
+ assert list(types) == toks, "expected %s, got %s" % (types, toks)
+
+ def valid(self, addr, expected):
+ got = self.do_parse(addr)
+ assert expected == got, "expected %s, got %s" % (expected, got)
+
+ def invalid(self, addr, error=None):
+ try:
+ p = self.do_parse(addr)
+ assert False, "invalid address parsed: %s" % p
+ except ParseError, e:
+ assert error == str(e), "expected %r, got %r" % (error, str(e))
diff --git a/python/qpid/util.py b/python/qpid/util.py
index 1140cbe5ef..3409d777f9 100644
--- a/python/qpid/util.py
+++ b/python/qpid/util.py
@@ -17,7 +17,26 @@
# under the License.
#
-import os, socket, time, textwrap
+import os, socket, time, textwrap, re
+
+try:
+ from ssl import wrap_socket as ssl
+except ImportError:
+ from socket import ssl as wrap_socket
+ class ssl:
+
+ def __init__(self, sock):
+ self.sock = sock
+ self.ssl = wrap_socket(sock)
+
+ def recv(self, n):
+ return self.ssl.read(n)
+
+ def send(self, s):
+ return self.ssl.write(s)
+
+ def close(self):
+ self.sock.close()
def connect(host, port):
sock = socket.socket()
@@ -32,8 +51,8 @@ def listen(host, port, predicate = lambda: True, bound = lambda: None):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
- bound()
sock.listen(5)
+ bound()
while predicate():
s, a = sock.accept()
yield s
@@ -48,7 +67,9 @@ def wait(condition, predicate, timeout=None):
start = time.time()
while not predicate():
if timeout is None:
- condition.wait()
+ # using the timed wait prevents keyboard interrupts from being
+ # blocked while waiting
+ condition.wait(3)
elif passed < timeout:
condition.wait(timeout - passed)
else:
@@ -76,3 +97,46 @@ def fill(text, indent, heading = None):
init = sub
w = textwrap.TextWrapper(initial_indent = init, subsequent_indent = sub)
return w.fill(" ".join(text.split()))
+
+class URL:
+
+ RE = re.compile(r"""
+ # [ <scheme>:// ] [ <user> [ / <password> ] @] <host> [ :<port> ]
+ ^ (?: ([^:/@]+)://)? (?: ([^:/@]+) (?: / ([^:/@]+) )? @)? ([^@:/]+) (?: :([0-9]+))?$
+""", re.X)
+
+ AMQPS = "amqps"
+ AMQP = "amqp"
+
+ def __init__(self, s):
+ match = URL.RE.match(s)
+ if match is None:
+ raise ValueError(s)
+ self.scheme, self.user, self.password, self.host, port = match.groups()
+ if port is None:
+ self.port = None
+ else:
+ self.port = int(port)
+
+ def __repr__(self):
+ return "URL(%r)" % str(self)
+
+ def __str__(self):
+ s = ""
+ if self.scheme:
+ s += "%s://" % self.scheme
+ if self.user:
+ s += self.user
+ if self.password:
+ s += "/%s" % self.password
+ s += "@"
+ s += self.host
+ if self.port:
+ s += ":%s" % self.port
+ return s
+
+def default(value, default):
+ if value is None:
+ return default
+ else:
+ return value
diff --git a/python/qpid_config.py b/python/qpid_config.py
index 8f987e9962..d740a53dfe 100644
--- a/python/qpid_config.py
+++ b/python/qpid_config.py
@@ -19,5 +19,7 @@
import os
-qpid_home = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-amqp_spec = os.path.join(qpid_home, "specs", "amqp.0-10-qpid-errata.xml")
+AMQP_SPEC_DIR=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "specs")
+amqp_spec = os.path.join(AMQP_SPEC_DIR, "amqp.0-10-qpid-errata.xml")
+amqp_spec_0_8 = os.path.join(AMQP_SPEC_DIR, "amqp.0-8.xml")
+amqp_spec_0_9 = os.path.join(AMQP_SPEC_DIR, "amqp.0-9.xml")
diff --git a/python/rule2test b/python/rule2test
deleted file mode 100755
index 10f151366e..0000000000
--- a/python/rule2test
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# Convert rules to tests
-#
-import sys, re, os.path
-from getopt import getopt, GetoptError
-from string import capitalize
-from xml import dom
-from xml.dom.minidom import parse
-
-def camelcase(s):
- """Convert 'string like this' to 'StringLikeThis'"""
- return "".join([capitalize(w) for w in re.split(re.compile("\W*"), s)])
-
-def uncapitalize(s): return s[0].lower()+s[1:]
-
-def ancestors(node):
- "Return iterator of ancestors from top-level element to node"
- def generator(node):
- while node and node.parentNode:
- yield node
- node = node.parentNode
- return reversed(list(generator(node)))
-
-def tagAndName(element):
- nameAttr = element.getAttribute("name");
- if (nameAttr) : return camelcase(nameAttr) + camelcase(element.tagName)
- else: return camelcase(element.tagName)
-
-def nodeText(n):
- """Recursively collect text from all text nodes under n"""
- if n.nodeType == dom.Node.TEXT_NODE:
- return n.data
- if n.childNodes:
- return reduce(lambda t, c: t + nodeText(c), n.childNodes, "")
- return ""
-
-def cleanup(docString, level=8):
- unindent = re.sub("\n[ \t]*", "\n", docString.strip())
- emptyLines = re.sub("\n\n\n", "\n\n", unindent)
- indented = re.sub("\n", "\n"+level*" ", emptyLines)
- return level*" " + indented
-
-def printTest(test, docstring):
- print "class %s(TestBase):" % test
- print ' """'
- print docstring
- print ' """'
- print
- print
-
-def printTests(doc, module):
- """Returns dictionary { classname : [ (methodname, docstring)* ] * }"""
- tests = {}
- rules = doc.getElementsByTagName("rule")
- for r in rules:
- path = list(ancestors(r))
- if module == path[1].getAttribute("name").lower():
- test = "".join(map(tagAndName, path[2:])) + "Tests"
- docstring = cleanup(nodeText(r), 4)
- printTest(test, docstring)
-
-def usage(message=None):
- if message: print >>sys.stderr, message
- print >>sys.stderr, """
-rule2test [options] <amqpclass>
-
-Print test classes for each rule for the amqpclass in amqp.xml.
-
-Options:
- -?/-h/--help : this message
- -s/--spec <spec.xml> : file containing amqp XML spec
-"""
- return 1
-
-def main(argv):
- try: opts, args = getopt(argv[1:], "h?s:", ["help", "spec="])
- except GetoptError, e: return usage(e)
- spec = "../specs/amqp.xml" # Default
- for opt, val in opts:
- if (opt in ("-h", "-?", "--help")): return usage()
- if (opt in ("-s", "--spec")): spec = val
- doc = parse(spec)
- if len(args) == 0: return usage()
- printTests(doc, args[0])
- return 0
-
-if (__name__ == "__main__"): sys.exit(main(sys.argv))
diff --git a/python/server b/python/server
index 37416314e2..56edd38490 100755
--- a/python/server
+++ b/python/server
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
import qpid
from qpid.connection import Connection, listen
from qpid.delegate import Delegate
diff --git a/python/server010 b/python/server010
index 0a75e2534e..8dfcd7a585 100755
--- a/python/server010
+++ b/python/server010
@@ -1,4 +1,22 @@
#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
from qpid import delegates
from qpid.connection010 import Connection
diff --git a/python/setup.py b/python/setup.py
index a49fa6ca51..be8c7c2a03 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -19,7 +19,7 @@
#
from distutils.core import setup
-setup(name="qpid", version="0.1", packages=["qpid"], scripts=["amqp-doc"],
- url="http://incubator.apache.org/qpid",
+setup(name="qpid", version="0.6", packages=["qpid", "mllib"], scripts=["amqp-doc"],
+ url="http://qpid.apache.org/",
license="Apache Software License",
description="Python language client implementation for Apache Qpid")
diff --git a/python/tests/__init__.py b/python/tests/__init__.py
index 8ad514fc2f..1e495f3af3 100644
--- a/python/tests/__init__.py
+++ b/python/tests/__init__.py
@@ -19,12 +19,4 @@
# under the License.
#
-from codec import *
-from queue import *
-from spec import *
-from framer import *
-from assembler import *
-from datatypes import *
-from connection import *
-from spec010 import *
-from codec010 import *
+import codec, queue, datatypes, connection, spec010, codec010
diff --git a/python/tests/assembler.py b/python/tests/assembler.py
deleted file mode 100644
index b76924e59d..0000000000
--- a/python/tests/assembler.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from threading import *
-from unittest import TestCase
-from qpid.util import connect, listen
-from qpid.assembler import *
-
-PORT = 1234
-
-class AssemblerTest(TestCase):
-
- def setUp(self):
- started = Event()
- self.running = True
-
- def run():
- running = True
- for s in listen("0.0.0.0", PORT, lambda: self.running, lambda: started.set()):
- asm = Assembler(s)
- try:
- asm.write_header(*asm.read_header()[-2:])
- while True:
- seg = asm.read_segment()
- asm.write_segment(seg)
- except Closed:
- pass
-
- self.server = Thread(target=run)
- self.server.setDaemon(True)
- self.server.start()
-
- started.wait(3)
-
- def tearDown(self):
- self.running = False
- self.server.join()
-
- def test(self):
- asm = Assembler(connect("0.0.0.0", PORT), max_payload = 1)
- asm.write_header(0, 10)
- asm.write_segment(Segment(True, False, 1, 2, 3, "TEST"))
- asm.write_segment(Segment(False, True, 1, 2, 3, "ING"))
-
- assert asm.read_header() == ("AMQP", 1, 1, 0, 10)
-
- seg = asm.read_segment()
- assert seg.first == True
- assert seg.last == False
- assert seg.type == 1
- assert seg.track == 2
- assert seg.channel == 3
- assert seg.payload == "TEST"
-
- seg = asm.read_segment()
- assert seg.first == False
- assert seg.last == True
- assert seg.type == 1
- assert seg.track == 2
- assert seg.channel == 3
- assert seg.payload == "ING"
diff --git a/python/tests/codec.py b/python/tests/codec.py
index 4bd3675af8..9b51b4713c 100644
--- a/python/tests/codec.py
+++ b/python/tests/codec.py
@@ -23,7 +23,6 @@ from qpid.codec import Codec
from qpid.spec import load
from cStringIO import StringIO
from qpid.reference import ReferenceId
-from qpid.testlib import testrunner
__doc__ = """
@@ -54,13 +53,8 @@ __doc__ = """
"""
-SPEC = None
-
-def spec():
- global SPEC
- if SPEC == None:
- SPEC = load(testrunner.get_spec_file("amqp.0-8.xml"))
- return SPEC
+from qpid_config import amqp_spec_0_8
+SPEC = load(amqp_spec_0_8)
# --------------------------------------
# --------------------------------------
@@ -76,7 +70,7 @@ class BaseDataTypes(unittest.TestCase):
"""
standard setUp for unitetest (refer unittest documentation for details)
"""
- self.codec = Codec(StringIO(), spec())
+ self.codec = Codec(StringIO(), SPEC)
# ------------------
def tearDown(self):
@@ -507,7 +501,7 @@ def test(type, value):
else:
values = [value]
stream = StringIO()
- codec = Codec(stream, spec())
+ codec = Codec(stream, SPEC)
for v in values:
codec.encode(type, v)
codec.flush()
diff --git a/python/tests/codec010.py b/python/tests/codec010.py
index 835966e103..787ebc146f 100644
--- a/python/tests/codec010.py
+++ b/python/tests/codec010.py
@@ -17,31 +17,54 @@
# under the License.
#
+import time
+
from unittest import TestCase
-from qpid.spec010 import load
from qpid.codec010 import StringCodec
-from qpid.testlib import testrunner
+from qpid.datatypes import timestamp, uuid4
+from qpid.ops import PRIMITIVE
class CodecTest(TestCase):
- def setUp(self):
- self.spec = load(testrunner.get_spec_file("amqp.0-10.xml"))
-
- def check(self, type, value):
- t = self.spec[type]
- sc = StringCodec(self.spec)
- t.encode(sc, value)
- decoded = t.decode(sc)
- assert decoded == value, "%s, %s" % (decoded, value)
+ def check(self, type, value, compare=True):
+ t = PRIMITIVE[type]
+ sc = StringCodec()
+ sc.write_primitive(t, value)
+ decoded = sc.read_primitive(t)
+ if compare:
+ assert decoded == value, "%s, %s" % (decoded, value)
+ return decoded
def testMapString(self):
self.check("map", {"string": "this is a test"})
+ def testMapUnicode(self):
+ self.check("map", {"unicode": u"this is a unicode test"})
+
+ def testMapBinary(self):
+ self.check("map", {"binary": "\x7f\xb4R^\xe5\xf0:\x89\x96E1\xf6\xfe\xb9\x1b\xf5"})
+
+ def testMapBuffer(self):
+ s = "\x7f\xb4R^\xe5\xf0:\x89\x96E1\xf6\xfe\xb9\x1b\xf5"
+ dec = self.check("map", {"buffer": buffer(s)}, False)
+ assert dec["buffer"] == s
+
def testMapInt(self):
self.check("map", {"int": 3})
def testMapLong(self):
self.check("map", {"long": 2**32})
+ self.check("map", {"long": 1 << 34})
+ self.check("map", {"long": -(1 << 34)})
+
+ def testMapTimestamp(self):
+ decoded = self.check("map", {"timestamp": timestamp(0)})
+ assert isinstance(decoded["timestamp"], timestamp)
+
+ def testMapDatetime(self):
+ decoded = self.check("map", {"datetime": timestamp(0).datetime()}, compare=False)
+ assert isinstance(decoded["datetime"], timestamp)
+ assert decoded["datetime"] == 0.0
def testMapNone(self):
self.check("map", {"none": None})
@@ -52,13 +75,21 @@ class CodecTest(TestCase):
def testMapList(self):
self.check("map", {"list": [1, "two", 3.0, -4]})
+ def testMapUUID(self):
+ self.check("map", {"uuid": uuid4()})
+
def testMapAll(self):
- self.check("map", {"string": "this is a test",
- "int": 3,
- "long": 2**32,
- "none": None,
- "map": {"string": "nested map"},
- "list": [1, "two", 3.0, -4]})
+ decoded = self.check("map", {"string": "this is a test",
+ "unicode": u"this is a unicode test",
+ "binary": "\x7f\xb4R^\xe5\xf0:\x89\x96E1\xf6\xfe\xb9\x1b\xf5",
+ "int": 3,
+ "long": 2**32,
+ "timestamp": timestamp(0),
+ "none": None,
+ "map": {"string": "nested map"},
+ "list": [1, "two", 3.0, -4],
+ "uuid": uuid4()})
+ assert isinstance(decoded["timestamp"], timestamp)
def testMapEmpty(self):
self.check("map", {})
@@ -86,3 +117,17 @@ class CodecTest(TestCase):
def testArrayNone(self):
self.check("array", None)
+
+ def testInt16(self):
+ self.check("int16", 3)
+ self.check("int16", -3)
+
+ def testInt64(self):
+ self.check("int64", 3)
+ self.check("int64", -3)
+ self.check("int64", 1<<34)
+ self.check("int64", -(1<<34))
+
+ def testDatetime(self):
+ self.check("datetime", timestamp(0))
+ self.check("datetime", timestamp(long(time.time())))
diff --git a/python/tests/connection.py b/python/tests/connection.py
index 23e0c937fb..8c00df56e1 100644
--- a/python/tests/connection.py
+++ b/python/tests/connection.py
@@ -22,11 +22,10 @@ from unittest import TestCase
from qpid.util import connect, listen
from qpid.connection import *
from qpid.datatypes import Message
-from qpid.testlib import testrunner
from qpid.delegates import Server
from qpid.queue import Queue
-from qpid.spec010 import load
from qpid.session import Delegate
+from qpid.ops import QueueQueryResult
PORT = 1234
@@ -52,23 +51,24 @@ class TestSession(Delegate):
pass
def queue_query(self, qq):
- return qq._type.result.type.new((qq.queue,), {})
+ return QueueQueryResult(qq.queue)
- def message_transfer(self, cmd, headers, body):
+ def message_transfer(self, cmd):
if cmd.destination == "echo":
- m = Message(body)
- m.headers = headers
+ m = Message(cmd.payload)
+ m.headers = cmd.headers
self.session.message_transfer(cmd.destination, cmd.accept_mode,
cmd.acquire_mode, m)
elif cmd.destination == "abort":
self.session.channel.connection.sock.close()
+ elif cmd.destination == "heartbeat":
+ self.session.channel.connection_heartbeat()
else:
- self.queue.put((cmd, headers, body))
+ self.queue.put(cmd)
class ConnectionTest(TestCase):
def setUp(self):
- self.spec = load(testrunner.get_spec_file("amqp.0-10.xml"))
self.queue = Queue()
self.running = True
started = Event()
@@ -76,7 +76,7 @@ class ConnectionTest(TestCase):
def run():
ts = TestServer(self.queue)
for s in listen("0.0.0.0", PORT, lambda: self.running, lambda: started.set()):
- conn = Connection(s, self.spec, ts.connection)
+ conn = Connection(s, delegate=ts.connection)
try:
conn.start(5)
except Closed:
@@ -87,14 +87,15 @@ class ConnectionTest(TestCase):
self.server.start()
started.wait(3)
+ assert started.isSet()
def tearDown(self):
self.running = False
- connect("0.0.0.0", PORT).close()
+ connect("127.0.0.1", PORT).close()
self.server.join(3)
- def connect(self):
- return Connection(connect("0.0.0.0", PORT), self.spec)
+ def connect(self, **kwargs):
+ return Connection(connect("127.0.0.1", PORT), **kwargs)
def test(self):
c = self.connect()
@@ -133,17 +134,17 @@ class ConnectionTest(TestCase):
ssn.message_transfer(d)
for d in destinations:
- cmd, header, body = self.queue.get(10)
+ cmd = self.queue.get(10)
assert cmd.destination == d
- assert header == None
- assert body == None
+ assert cmd.headers == None
+ assert cmd.payload == None
msg = Message("this is a test")
ssn.message_transfer("four", message=msg)
- cmd, header, body = self.queue.get(10)
+ cmd = self.queue.get(10)
assert cmd.destination == "four"
- assert header == None
- assert body == msg.body
+ assert cmd.headers == None
+ assert cmd.payload == msg.body
qq = ssn.queue_query("asdf")
assert qq.queue == "asdf"
@@ -212,3 +213,10 @@ class ConnectionTest(TestCase):
s.auto_sync = False
s.message_transfer("echo", message=Message("test"))
s.sync(10)
+
+ def testHeartbeat(self):
+ c = self.connect(heartbeat=10)
+ c.start(10)
+ s = c.session("test")
+ s.channel.connection_heartbeat()
+ s.message_transfer("heartbeat")
diff --git a/python/tests/datatypes.py b/python/tests/datatypes.py
index ef98e81da0..00e649d6cf 100644
--- a/python/tests/datatypes.py
+++ b/python/tests/datatypes.py
@@ -18,23 +18,22 @@
#
from unittest import TestCase
-from qpid.testlib import testrunner
-from qpid.spec010 import load
from qpid.datatypes import *
+from qpid.ops import DeliveryProperties, FragmentProperties, MessageProperties
class SerialTest(TestCase):
def test(self):
- for s in (serial(0), serial(0x8FFFFFFF), serial(0xFFFFFFFF)):
+ for s in (serial(0), serial(0x8FFFFFFFL), serial(0xFFFFFFFFL)):
assert s + 1 > s
assert s - 1 < s
assert s < s + 1
assert s > s - 1
- assert serial(0xFFFFFFFF) + 1 == serial(0)
+ assert serial(0xFFFFFFFFL) + 1 == serial(0)
- assert min(serial(0xFFFFFFFF), serial(0x0)) == serial(0xFFFFFFFF)
- assert max(serial(0xFFFFFFFF), serial(0x0)) == serial(0x0)
+ assert min(serial(0xFFFFFFFFL), serial(0x0)) == serial(0xFFFFFFFFL)
+ assert max(serial(0xFFFFFFFFL), serial(0x0)) == serial(0x0)
def testIncr(self):
s = serial(0)
@@ -44,7 +43,7 @@ class SerialTest(TestCase):
def testIn(self):
l = [serial(1), serial(2), serial(3), serial(4)]
assert serial(1) in l
- assert serial(0xFFFFFFFF + 2) in l
+ assert serial(0xFFFFFFFFL + 2) in l
assert 4 in l
def testNone(self):
@@ -55,6 +54,19 @@ class SerialTest(TestCase):
d[serial(0)] = "zero"
assert d[0] == "zero"
+ def testAdd(self):
+ assert serial(2) + 2 == serial(4)
+ assert serial(2) + 2 == 4
+
+ def testSub(self):
+ delta = serial(4) - serial(2)
+ assert isinstance(delta, int) or isinstance(delta, long)
+ assert delta == 2
+
+ delta = serial(4) - 2
+ assert isinstance(delta, Serial)
+ assert delta == serial(2)
+
class RangedSetTest(TestCase):
def check(self, ranges):
@@ -136,6 +148,34 @@ class RangedSetTest(TestCase):
assert range.lower == 0
assert range.upper == 8
+ def testEmpty(self):
+ s = RangedSet()
+ assert s.empty()
+ s.add(0, -1)
+ assert s.empty()
+ s.add(0, 0)
+ assert not s.empty()
+
+ def testMinMax(self):
+ s = RangedSet()
+ assert s.max() is None
+ assert s.min() is None
+ s.add(0, 10)
+ assert s.max() == 10
+ assert s.min() == 0
+ s.add(0, 5)
+ assert s.max() == 10
+ assert s.min() == 0
+ s.add(0, 11)
+ assert s.max() == 11
+ assert s.min() == 0
+ s.add(15, 20)
+ assert s.max() == 20
+ assert s.min() == 0
+ s.add(-10, -5)
+ assert s.max() == 20
+ assert s.min() == -10
+
class RangeTest(TestCase):
def testIntersect1(self):
@@ -176,10 +216,9 @@ class UUIDTest(TestCase):
class MessageTest(TestCase):
def setUp(self):
- self.spec = load(testrunner.get_spec_file("amqp.0-10-qpid-errata.xml"))
- self.mp = Struct(self.spec["message.message_properties"])
- self.dp = Struct(self.spec["message.delivery_properties"])
- self.fp = Struct(self.spec["message.fragment_properties"])
+ self.mp = MessageProperties()
+ self.dp = DeliveryProperties()
+ self.fp = FragmentProperties()
def testHas(self):
m = Message(self.mp, self.dp, self.fp, "body")
@@ -207,7 +246,7 @@ class MessageTest(TestCase):
def testSetReplace(self):
m = Message(self.mp, self.dp, self.fp, "body")
- dp = Struct(self.spec["message.delivery_properties"])
+ dp = DeliveryProperties()
assert m.get("delivery_properties") == self.dp
assert m.get("delivery_properties") != dp
m.set(dp)
@@ -223,3 +262,35 @@ class MessageTest(TestCase):
assert m.get("fragment_properties") is None
assert m.get("message_properties") == self.mp
assert m.get("delivery_properties") == self.dp
+
+class TimestampTest(TestCase):
+
+ def check(self, expected, *values):
+ for v in values:
+ assert isinstance(v, timestamp)
+ assert v == expected
+ assert v == timestamp(expected)
+
+ def testAdd(self):
+ self.check(4.0,
+ timestamp(2.0) + 2.0,
+ 2.0 + timestamp(2.0))
+
+ def testSub(self):
+ self.check(2.0,
+ timestamp(4.0) - 2.0,
+ 4.0 - timestamp(2.0))
+
+ def testNeg(self):
+ self.check(-4.0, -timestamp(4.0))
+
+ def testPos(self):
+ self.check(+4.0, +timestamp(4.0))
+
+ def testAbs(self):
+ self.check(4.0, abs(timestamp(-4.0)))
+
+ def testConversion(self):
+ dt = timestamp(0).datetime()
+ t = timestamp(dt)
+ assert t == 0
diff --git a/python/tests/framer.py b/python/tests/framer.py
deleted file mode 100644
index 05bb467bbe..0000000000
--- a/python/tests/framer.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from threading import *
-from unittest import TestCase
-from qpid.util import connect, listen
-from qpid.framer import *
-
-PORT = 1234
-
-class FramerTest(TestCase):
-
- def setUp(self):
- self.running = True
- started = Event()
- def run():
- for s in listen("0.0.0.0", PORT, lambda: self.running, lambda: started.set()):
- conn = Framer(s)
- try:
- conn.write_header(*conn.read_header()[-2:])
- while True:
- frame = conn.read_frame()
- conn.write_frame(frame)
- conn.flush()
- except Closed:
- pass
-
- self.server = Thread(target=run)
- self.server.setDaemon(True)
- self.server.start()
-
- started.wait(3)
-
- def tearDown(self):
- self.running = False
- self.server.join(3)
-
- def test(self):
- c = Framer(connect("0.0.0.0", PORT))
-
- c.write_header(0, 10)
- assert c.read_header() == ("AMQP", 1, 1, 0, 10)
-
- c.write_frame(Frame(FIRST_FRM, 1, 2, 3, "THIS"))
- c.write_frame(Frame(0, 1, 2, 3, "IS"))
- c.write_frame(Frame(0, 1, 2, 3, "A"))
- c.write_frame(Frame(LAST_FRM, 1, 2, 3, "TEST"))
- c.flush()
-
- f = c.read_frame()
- assert f.flags & FIRST_FRM
- assert not (f.flags & LAST_FRM)
- assert f.type == 1
- assert f.track == 2
- assert f.channel == 3
- assert f.payload == "THIS"
-
- f = c.read_frame()
- assert f.flags == 0
- assert f.type == 1
- assert f.track == 2
- assert f.channel == 3
- assert f.payload == "IS"
-
- f = c.read_frame()
- assert f.flags == 0
- assert f.type == 1
- assert f.track == 2
- assert f.channel == 3
- assert f.payload == "A"
-
- f = c.read_frame()
- assert f.flags & LAST_FRM
- assert not (f.flags & FIRST_FRM)
- assert f.type == 1
- assert f.track == 2
- assert f.channel == 3
- assert f.payload == "TEST"
diff --git a/python/tests/spec.py b/python/tests/spec.py
deleted file mode 100644
index ce03640493..0000000000
--- a/python/tests/spec.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from unittest import TestCase
-from qpid.spec import load
-from qpid.testlib import testrunner
-
-class SpecTest(TestCase):
-
- def check_load(self, *urls):
- spec = load(*map(testrunner.get_spec_file, urls))
- qdecl = spec.method("queue_declare")
- assert qdecl != None
- assert not qdecl.content
-
- queue = qdecl.fields.byname["queue"]
- assert queue != None
- assert queue.domain.name == "queue_name"
- assert queue.type == "shortstr"
-
- qdecl_ok = spec.method("queue_declare_ok")
-
- # 0-8 is actually 8-0
- if (spec.major == 8 and spec.minor == 0 or
- spec.major == 0 and spec.minor == 9):
- assert qdecl_ok != None
-
- assert len(qdecl.responses) == 1
- assert qdecl_ok in qdecl.responses
-
- publish = spec.method("basic_publish")
- assert publish != None
- assert publish.content
-
- if (spec.major == 0 and spec.minor == 10):
- assert qdecl_ok == None
- reply_to = spec.domains.byname["reply_to"]
- assert reply_to.type.size == 2
- assert reply_to.type.pack == 2
- assert len(reply_to.type.fields) == 2
-
- qq = spec.method("queue_query")
- assert qq != None
- assert qq.result.size == 4
- assert qq.result.type != None
- args = qq.result.fields.byname["arguments"]
- assert args.type == "table"
-
- def test_load_0_8(self):
- self.check_load("amqp.0-8.xml")
-
- def test_load_0_9(self):
- self.check_load("amqp.0-9.xml")
-
- def test_load_0_9_errata(self):
- self.check_load("amqp.0-9.xml", "amqp-errata.0-9.xml")
-
- def test_load_0_10(self):
- self.check_load("amqp.0-10-preview.xml")
diff --git a/python/tests/spec010.py b/python/tests/spec010.py
index df9cb9590a..ac04e1ee02 100644
--- a/python/tests/spec010.py
+++ b/python/tests/spec010.py
@@ -19,66 +19,56 @@
import os, tempfile, shutil, stat
from unittest import TestCase
-from qpid.spec010 import load
from qpid.codec010 import Codec, StringCodec
-from qpid.testlib import testrunner
-from qpid.datatypes import Struct
+from qpid.ops import *
class SpecTest(TestCase):
- def setUp(self):
- self.spec = load(testrunner.get_spec_file("amqp.0-10-qpid-errata.xml"))
-
def testSessionHeader(self):
- hdr = self.spec["session.header"]
- sc = StringCodec(self.spec)
- hdr.encode(sc, Struct(hdr, sync=True))
+ sc = StringCodec()
+ sc.write_compound(Header(sync=True))
assert sc.encoded == "\x01\x01"
- sc = StringCodec(self.spec)
- hdr.encode(sc, Struct(hdr, sync=False))
+ sc = StringCodec()
+ sc.write_compound(Header(sync=False))
assert sc.encoded == "\x01\x00"
- def encdec(self, type, value):
- sc = StringCodec(self.spec)
- type.encode(sc, value)
- decoded = type.decode(sc)
+ def encdec(self, value):
+ sc = StringCodec()
+ sc.write_compound(value)
+ decoded = sc.read_compound(value.__class__)
return decoded
def testMessageProperties(self):
- mp = self.spec["message.message_properties"]
- rt = self.spec["message.reply_to"]
-
- props = Struct(mp, content_length=3735928559L,
- reply_to=Struct(rt, exchange="the exchange name",
- routing_key="the routing key"))
- dec = self.encdec(mp, props)
+ props = MessageProperties(content_length=3735928559L,
+ reply_to=ReplyTo(exchange="the exchange name",
+ routing_key="the routing key"))
+ dec = self.encdec(props)
assert props.content_length == dec.content_length
assert props.reply_to.exchange == dec.reply_to.exchange
assert props.reply_to.routing_key == dec.reply_to.routing_key
def testMessageSubscribe(self):
- ms = self.spec["message.subscribe"]
- cmd = Struct(ms, exclusive=True, destination="this is a test")
- dec = self.encdec(self.spec["message.subscribe"], cmd)
+ cmd = MessageSubscribe(exclusive=True, destination="this is a test")
+ dec = self.encdec(cmd)
assert cmd.exclusive == dec.exclusive
assert cmd.destination == dec.destination
def testXid(self):
- xid = self.spec["dtx.xid"]
- sc = StringCodec(self.spec)
- st = Struct(xid, format=0, global_id="gid", branch_id="bid")
- xid.encode(sc, st)
+ sc = StringCodec()
+ xid = Xid(format=0, global_id="gid", branch_id="bid")
+ sc.write_compound(xid)
assert sc.encoded == '\x00\x00\x00\x10\x06\x04\x07\x00\x00\x00\x00\x00\x03gid\x03bid'
- assert xid.decode(sc).__dict__ == st.__dict__
+ dec = sc.read_compound(Xid)
+ assert xid.__dict__ == dec.__dict__
- def testLoadReadOnly(self):
- spec = "amqp.0-10-qpid-errata.xml"
- f = testrunner.get_spec_file(spec)
- dest = tempfile.mkdtemp()
- shutil.copy(f, dest)
- shutil.copy(os.path.join(os.path.dirname(f), "amqp.0-10.dtd"), dest)
- os.chmod(dest, stat.S_IRUSR | stat.S_IXUSR)
- fname = os.path.join(dest, spec)
- load(fname)
- assert not os.path.exists("%s.pcl" % fname)
+# def testLoadReadOnly(self):
+# spec = "amqp.0-10-qpid-errata.xml"
+# f = testrunner.get_spec_file(spec)
+# dest = tempfile.mkdtemp()
+# shutil.copy(f, dest)
+# shutil.copy(os.path.join(os.path.dirname(f), "amqp.0-10.dtd"), dest)
+# os.chmod(dest, stat.S_IRUSR | stat.S_IXUSR)
+# fname = os.path.join(dest, spec)
+# load(fname)
+# assert not os.path.exists("%s.pcl" % fname)
diff --git a/python/tests_0-10/__init__.py b/python/tests_0-10/__init__.py
index 1fd7f72357..f9315a6f90 100644
--- a/python/tests_0-10/__init__.py
+++ b/python/tests_0-10/__init__.py
@@ -24,6 +24,7 @@ from broker import *
from dtx import *
from example import *
from exchange import *
+from management import *
from message import *
from query import *
from queue import *
diff --git a/python/tests_0-10/alternate_exchange.py b/python/tests_0-10/alternate_exchange.py
index aac8a5e15b..4d8617eb8e 100644
--- a/python/tests_0-10/alternate_exchange.py
+++ b/python/tests_0-10/alternate_exchange.py
@@ -41,16 +41,16 @@ class AlternateExchangeTests(TestBase010):
session.queue_declare(queue="returns", exclusive=True, auto_delete=True)
session.exchange_bind(queue="returns", exchange="secondary")
session.message_subscribe(destination="a", queue="returns")
- session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
returned = session.incoming("a")
#declare, bind (to the primary exchange) and consume from a queue for 'processed' messages
session.queue_declare(queue="processed", exclusive=True, auto_delete=True)
session.exchange_bind(queue="processed", exchange="primary", binding_key="my-key")
session.message_subscribe(destination="b", queue="processed")
- session.message_flow(destination="b", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="b", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="b", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="b", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
processed = session.incoming("b")
#publish to the primary exchange
@@ -81,8 +81,8 @@ class AlternateExchangeTests(TestBase010):
session.queue_declare(queue="deleted", exclusive=True, auto_delete=True)
session.exchange_bind(exchange="dlq", queue="deleted")
session.message_subscribe(destination="dlq", queue="deleted")
- session.message_flow(destination="dlq", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="dlq", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="dlq", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="dlq", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
dlq = session.incoming("dlq")
#create a queue using the dlq as its alternate exchange:
@@ -141,7 +141,61 @@ class AlternateExchangeTests(TestBase010):
session.exchange_delete(exchange="e")
session.exchange_delete(exchange="alternate")
self.assertEquals(530, e.args[0].error_code)
-
+
+
+ def test_modify_existing_exchange_alternate(self):
+ """
+ Ensure that attempting to modify an exhange to change
+ the alternate throws an exception
+ """
+ session = self.session
+ session.exchange_declare(exchange="alt1", type="direct")
+ session.exchange_declare(exchange="alt2", type="direct")
+ session.exchange_declare(exchange="onealternate", type="fanout", alternate_exchange="alt1")
+ try:
+ # attempt to change the alternate on an already existing exchange
+ session.exchange_declare(exchange="onealternate", type="fanout", alternate_exchange="alt2")
+ self.fail("Expected changing an alternate on an existing exchange to fail")
+ except SessionException, e:
+ self.assertEquals(530, e.args[0].error_code)
+ session = self.conn.session("alternate", 2)
+ session.exchange_delete(exchange="onealternate")
+ session.exchange_delete(exchange="alt2")
+ session.exchange_delete(exchange="alt1")
+
+
+ def test_add_alternate_to_exchange(self):
+ """
+ Ensure that attempting to modify an exhange by adding
+ an alternate throws an exception
+ """
+ session = self.session
+ session.exchange_declare(exchange="alt1", type="direct")
+ session.exchange_declare(exchange="noalternate", type="fanout")
+ try:
+ # attempt to add an alternate on an already existing exchange
+ session.exchange_declare(exchange="noalternate", type="fanout", alternate_exchange="alt1")
+ self.fail("Expected adding an alternate on an existing exchange to fail")
+ except SessionException, e:
+ self.assertEquals(530, e.args[0].error_code)
+ session = self.conn.session("alternate", 2)
+ session.exchange_delete(exchange="noalternate")
+ session.exchange_delete(exchange="alt1")
+
+
+ def test_del_alternate_to_exchange(self):
+ """
+ Ensure that attempting to modify an exhange by declaring
+ it again without an alternate does nothing
+ """
+ session = self.session
+ session.exchange_declare(exchange="alt1", type="direct")
+ session.exchange_declare(exchange="onealternate", type="fanout", alternate_exchange="alt1")
+ # attempt to re-declare without an alternate - silently ignore
+ session.exchange_declare(exchange="onealternate", type="fanout" )
+ session.exchange_delete(exchange="onealternate")
+ session.exchange_delete(exchange="alt1")
+
def assertEmpty(self, queue):
try:
diff --git a/python/tests_0-10/broker.py b/python/tests_0-10/broker.py
index d4aa57765c..81d723e322 100644
--- a/python/tests_0-10/broker.py
+++ b/python/tests_0-10/broker.py
@@ -36,8 +36,8 @@ class BrokerTests(TestBase010):
# No ack consumer
ctag = "tag1"
session.message_subscribe(queue = "myqueue", destination = ctag)
- session.message_flow(destination=ctag, unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination=ctag, unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination=ctag, unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination=ctag, unit=session.credit_unit.byte, value=0xFFFFFFFFL)
body = "test no-ack"
session.message_transfer(message=Message(session.delivery_properties(routing_key="myqueue"), body))
msg = session.incoming(ctag).get(timeout = 5)
@@ -47,8 +47,8 @@ class BrokerTests(TestBase010):
session.queue_declare(queue = "otherqueue", exclusive=True, auto_delete=True)
ctag = "tag2"
session.message_subscribe(queue = "otherqueue", destination = ctag, accept_mode = 1)
- session.message_flow(destination=ctag, unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination=ctag, unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination=ctag, unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination=ctag, unit=session.credit_unit.byte, value=0xFFFFFFFFL)
body = "test ack"
session.message_transfer(message=Message(session.delivery_properties(routing_key="otherqueue"), body))
msg = session.incoming(ctag).get(timeout = 5)
@@ -64,8 +64,8 @@ class BrokerTests(TestBase010):
session.exchange_bind(queue="test-queue", exchange="amq.fanout")
consumer_tag = "tag1"
session.message_subscribe(queue="test-queue", destination=consumer_tag)
- session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = consumer_tag)
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = consumer_tag)
queue = session.incoming(consumer_tag)
body = "Immediate Delivery"
@@ -86,8 +86,8 @@ class BrokerTests(TestBase010):
consumer_tag = "tag1"
session.message_subscribe(queue="test-queue", destination=consumer_tag)
- session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = consumer_tag)
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = consumer_tag)
queue = session.incoming(consumer_tag)
msg = queue.get(timeout=5)
self.assert_(msg.body == body)
diff --git a/python/tests_0-10/dtx.py b/python/tests_0-10/dtx.py
index 25c2defd3b..2823385a3b 100644
--- a/python/tests_0-10/dtx.py
+++ b/python/tests_0-10/dtx.py
@@ -575,7 +575,7 @@ class DtxTests(TestBase010):
session2.dtx_start(xid=tx)
session2.message_subscribe(queue="dummy", destination="dummy")
session2.message_flow(destination="dummy", unit=session2.credit_unit.message, value=1)
- session2.message_flow(destination="dummy", unit=session2.credit_unit.byte, value=0xFFFFFFFF)
+ session2.message_flow(destination="dummy", unit=session2.credit_unit.byte, value=0xFFFFFFFFL)
msg = session2.incoming("dummy").get(timeout=1)
session2.message_accept(RangedSet(msg.id))
session2.message_cancel(destination="dummy")
@@ -736,7 +736,7 @@ class DtxTests(TestBase010):
#consume from src:
session.message_subscribe(destination="temp-swap", queue=src)
session.message_flow(destination="temp-swap", unit=session.credit_unit.message, value=1)
- session.message_flow(destination="temp-swap", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="temp-swap", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
msg = session.incoming("temp-swap").get(timeout=1)
session.message_cancel(destination="temp-swap")
session.message_accept(RangedSet(msg.id))
@@ -753,7 +753,7 @@ class DtxTests(TestBase010):
def assertMessageId(self, expected, queue):
self.session.message_subscribe(queue=queue, destination="results")
self.session.message_flow(destination="results", unit=self.session.credit_unit.message, value=1)
- self.session.message_flow(destination="results", unit=self.session.credit_unit.byte, value=0xFFFFFFFF)
+ self.session.message_flow(destination="results", unit=self.session.credit_unit.byte, value=0xFFFFFFFFL)
self.assertEqual(expected, self.getMessageProperty(self.session.incoming("results").get(timeout=1), 'correlation_id'))
self.session.message_cancel(destination="results")
diff --git a/python/tests_0-10/example.py b/python/tests_0-10/example.py
index 83d208192b..e36907d501 100644
--- a/python/tests_0-10/example.py
+++ b/python/tests_0-10/example.py
@@ -69,8 +69,8 @@ class ExampleTest (TestBase010):
# field that is filled if the reply includes content. In this case the
# interesting field is the consumer_tag.
session.message_subscribe(queue="test-queue", destination="consumer_tag")
- session.message_flow(destination="consumer_tag", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="consumer_tag", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
# We can use the session.incoming(...) method to access the messages
# delivered for our consumer_tag.
diff --git a/python/tests_0-10/exchange.py b/python/tests_0-10/exchange.py
index 4b5dc78143..8d9713076d 100644
--- a/python/tests_0-10/exchange.py
+++ b/python/tests_0-10/exchange.py
@@ -108,8 +108,8 @@ class TestHelper(TestBase010):
else: self.uniqueTag += 1
consumer_tag = "tag" + str(self.uniqueTag)
self.session.message_subscribe(queue=queueName, destination=consumer_tag)
- self.session.message_flow(destination=consumer_tag, unit=self.session.credit_unit.message, value=0xFFFFFFFF)
- self.session.message_flow(destination=consumer_tag, unit=self.session.credit_unit.byte, value=0xFFFFFFFF)
+ self.session.message_flow(destination=consumer_tag, unit=self.session.credit_unit.message, value=0xFFFFFFFFL)
+ self.session.message_flow(destination=consumer_tag, unit=self.session.credit_unit.byte, value=0xFFFFFFFFL)
return self.session.incoming(consumer_tag)
@@ -269,8 +269,49 @@ class DeclareMethodExchangeFieldReservedRuleTests(TestHelper):
standardised exchanges. The client MUST NOT attempt to create an exchange
starting with "amq.".
-
+ Similarly, exchanges starting with "qpid." are reserved for Qpid
+ implementation-specific system exchanges (such as the management exchange).
+ The client must not attempt to create an exchange starting with the string
+ "qpid.".
"""
+ def template(self, reservedString, exchangeType):
+ try:
+ self.session.exchange_declare(exchange=reservedString, type=exchangeType)
+ self.fail("Expected not allowed error (530) for exchanges starting with \"" + reservedString + "\".")
+ except SessionException, e:
+ self.assertEquals(e.args[0].error_code, 530)
+ # connection closed, reopen it
+ self.tearDown()
+ self.setUp()
+ try:
+ self.session.exchange_declare(exchange=reservedString + "abc123", type=exchangeType)
+ self.fail("Expected not allowed error (530) for exchanges starting with \"" + reservedString + "\".")
+ except SessionException, e:
+ self.assertEquals(e.args[0].error_code, 530)
+ # connection closed, reopen it
+ self.tearDown()
+ self.setUp()
+ # The following should be legal:
+ self.session.exchange_declare(exchange=reservedString[:-1], type=exchangeType)
+ self.session.exchange_delete(exchange=reservedString[:-1])
+ self.session.exchange_declare(exchange=reservedString[1:], type=exchangeType)
+ self.session.exchange_delete(exchange=reservedString[1:])
+ self.session.exchange_declare(exchange="." + reservedString, type=exchangeType)
+ self.session.exchange_delete(exchange="." + reservedString)
+ self.session.exchange_declare(exchange="abc." + reservedString, type=exchangeType)
+ self.session.exchange_delete(exchange="abc." + reservedString)
+ self.session.exchange_declare(exchange="abc." + reservedString + "def", type=exchangeType)
+ self.session.exchange_delete(exchange="abc." + reservedString + "def")
+
+ def test_amq(self):
+ self.template("amq.", "direct")
+ self.template("amq.", "topic")
+ self.template("amq.", "fanout")
+
+ def test_qpid(self):
+ self.template("qpid.", "direct")
+ self.template("qpid.", "topic")
+ self.template("qpid.", "fanout")
class DeclareMethodTypeFieldTypedRuleTests(TestHelper):
diff --git a/python/tests_0-10/management.py b/python/tests_0-10/management.py
index f1360a1902..9dd03bbda4 100644
--- a/python/tests_0-10/management.py
+++ b/python/tests_0-10/management.py
@@ -20,19 +20,22 @@
from qpid.datatypes import Message, RangedSet
from qpid.testlib import TestBase010
from qpid.management import managementChannel, managementClient
+from threading import Condition
+from time import sleep
+import qmf.console
class ManagementTest (TestBase010):
"""
Tests for the management hooks
"""
- def test_broker_connectivity (self):
+ def test_broker_connectivity_oldAPI (self):
"""
Call the "echo" method on the broker to verify it is alive and talking.
"""
session = self.session
- mc = managementClient (session.spec)
+ mc = managementClient ()
mch = mc.addChannel (session)
mc.syncWaitForStable (mch)
@@ -52,40 +55,62 @@ class ManagementTest (TestBase010):
self.assertEqual (res.body, body)
mc.removeChannel (mch)
- def test_system_object (self):
+ def test_methods_sync (self):
+ """
+ Call the "echo" method on the broker to verify it is alive and talking.
+ """
session = self.session
+ self.startQmf()
- mc = managementClient (session.spec)
- mch = mc.addChannel (session)
+ brokers = self.qmf.getObjects(_class="broker")
+ self.assertEqual(len(brokers), 1)
+ broker = brokers[0]
- mc.syncWaitForStable (mch)
- systems = mc.syncGetObjects (mch, "system")
- self.assertEqual (len (systems), 1)
- mc.removeChannel (mch)
+ body = "Echo Message Body"
+ for seq in range(1, 20):
+ res = broker.echo(seq, body)
+ self.assertEqual(res.status, 0)
+ self.assertEqual(res.text, "OK")
+ self.assertEqual(res.sequence, seq)
+ self.assertEqual(res.body, body)
+
+ def test_get_objects(self):
+ self.startQmf()
+
+ # get the package list, verify that the qpid broker package is there
+ packages = self.qmf.getPackages()
+ assert 'org.apache.qpid.broker' in packages
+
+ # get the schema class keys for the broker, verify the broker table and link-down event
+ keys = self.qmf.getClasses('org.apache.qpid.broker')
+ broker = None
+ linkDown = None
+ for key in keys:
+ if key.getClassName() == "broker": broker = key
+ if key.getClassName() == "brokerLinkDown" : linkDown = key
+ assert broker
+ assert linkDown
+
+ brokerObjs = self.qmf.getObjects(_class="broker")
+ assert len(brokerObjs) == 1
+ brokerObjs = self.qmf.getObjects(_key=broker)
+ assert len(brokerObjs) == 1
def test_self_session_id (self):
- session = self.session
-
- mc = managementClient (session.spec)
- mch = mc.addChannel (session)
+ self.startQmf()
+ sessionId = self.qmf_broker.getSessionId()
+ brokerSessions = self.qmf.getObjects(_class="session")
- info = mc.syncWaitForStable (mch)
- brokerSessions = mc.syncGetObjects (mch, "session")
found = False
for bs in brokerSessions:
- if bs.name == info.sessionId:
+ if bs.name == sessionId:
found = True
self.assertEqual (found, True)
- mc.removeChannel (mch)
def test_standard_exchanges (self):
- session = self.session
-
- mc = managementClient (session.spec)
- mch = mc.addChannel (session)
+ self.startQmf()
- mc.syncWaitForStable (mch)
- exchanges = mc.syncGetObjects (mch, "exchange")
+ exchanges = self.qmf.getObjects(_class="exchange")
exchange = self.findExchange (exchanges, "")
self.assertEqual (exchange.type, "direct")
exchange = self.findExchange (exchanges, "amq.direct")
@@ -98,10 +123,276 @@ class ManagementTest (TestBase010):
self.assertEqual (exchange.type, "headers")
exchange = self.findExchange (exchanges, "qpid.management")
self.assertEqual (exchange.type, "topic")
- mc.removeChannel (mch)
def findExchange (self, exchanges, name):
for exchange in exchanges:
if exchange.name == name:
return exchange
return None
+
+ def test_move_queued_messages(self):
+ """
+ Test ability to move messages from the head of one queue to another.
+ Need to test moveing all and N messages.
+ """
+ self.startQmf()
+ session = self.session
+ "Set up source queue"
+ session.queue_declare(queue="src-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="src-queue", exchange="amq.direct", binding_key="routing_key")
+
+ twenty = range(1,21)
+ props = session.delivery_properties(routing_key="routing_key")
+ for count in twenty:
+ body = "Move Message %d" % count
+ src_msg = Message(props, body)
+ session.message_transfer(destination="amq.direct", message=src_msg)
+
+ "Set up destination queue"
+ session.queue_declare(queue="dest-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="dest-queue", exchange="amq.direct")
+
+ queues = self.qmf.getObjects(_class="queue")
+
+ "Move 10 messages from src-queue to dest-queue"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue", "dest-queue", 10)
+ self.assertEqual (result.status, 0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue")[0]
+
+ self.assertEqual (sq.msgDepth,10)
+ self.assertEqual (dq.msgDepth,10)
+
+ "Move all remaining messages to destination"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue", "dest-queue", 0)
+ self.assertEqual (result.status,0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue")[0]
+
+ self.assertEqual (sq.msgDepth,0)
+ self.assertEqual (dq.msgDepth,20)
+
+ "Use a bad source queue name"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("bad-src-queue", "dest-queue", 0)
+ self.assertEqual (result.status,4)
+
+ "Use a bad destination queue name"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue", "bad-dest-queue", 0)
+ self.assertEqual (result.status,4)
+
+ " Use a large qty (40) to move from dest-queue back to "
+ " src-queue- should move all "
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("dest-queue", "src-queue", 40)
+ self.assertEqual (result.status,0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue")[0]
+
+ self.assertEqual (sq.msgDepth,20)
+ self.assertEqual (dq.msgDepth,0)
+
+ "Consume the messages of the queue and check they are all there in order"
+ session.message_subscribe(queue="src-queue", destination="tag")
+ session.message_flow(destination="tag", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="tag", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ queue = session.incoming("tag")
+ for count in twenty:
+ consumed_msg = queue.get(timeout=1)
+ body = "Move Message %d" % count
+ self.assertEqual(body, consumed_msg.body)
+
+ def test_purge_queue(self):
+ """
+ Test ability to purge messages from the head of a queue.
+ Need to test moveing all, 1 (top message) and N messages.
+ """
+ self.startQmf()
+ session = self.session
+ "Set up purge queue"
+ session.queue_declare(queue="purge-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="purge-queue", exchange="amq.direct", binding_key="routing_key")
+
+ twenty = range(1,21)
+ props = session.delivery_properties(routing_key="routing_key")
+ for count in twenty:
+ body = "Purge Message %d" % count
+ msg = Message(props, body)
+ session.message_transfer(destination="amq.direct", message=msg)
+
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+
+ "Purge top message from purge-queue"
+ result = pq.purge(1)
+ self.assertEqual (result.status, 0)
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+ self.assertEqual (pq.msgDepth,19)
+
+ "Purge top 9 messages from purge-queue"
+ result = pq.purge(9)
+ self.assertEqual (result.status, 0)
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+ self.assertEqual (pq.msgDepth,10)
+
+ "Purge all messages from purge-queue"
+ result = pq.purge(0)
+ self.assertEqual (result.status, 0)
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+ self.assertEqual (pq.msgDepth,0)
+
+ def test_methods_async (self):
+ """
+ """
+ class Handler (qmf.console.Console):
+ def __init__(self):
+ self.cv = Condition()
+ self.xmtList = {}
+ self.rcvList = {}
+
+ def methodResponse(self, broker, seq, response):
+ self.cv.acquire()
+ try:
+ self.rcvList[seq] = response
+ finally:
+ self.cv.release()
+
+ def request(self, broker, count):
+ self.count = count
+ for idx in range(count):
+ self.cv.acquire()
+ try:
+ seq = broker.echo(idx, "Echo Message", _async = True)
+ self.xmtList[seq] = idx
+ finally:
+ self.cv.release()
+
+ def check(self):
+ if self.count != len(self.xmtList):
+ return "fail (attempted send=%d, actual sent=%d)" % (self.count, len(self.xmtList))
+ lost = 0
+ mismatched = 0
+ for seq in self.xmtList:
+ value = self.xmtList[seq]
+ if seq in self.rcvList:
+ result = self.rcvList.pop(seq)
+ if result.sequence != value:
+ mismatched += 1
+ else:
+ lost += 1
+ spurious = len(self.rcvList)
+ if lost == 0 and mismatched == 0 and spurious == 0:
+ return "pass"
+ else:
+ return "fail (lost=%d, mismatch=%d, spurious=%d)" % (lost, mismatched, spurious)
+
+ handler = Handler()
+ self.startQmf(handler)
+ brokers = self.qmf.getObjects(_class="broker")
+ self.assertEqual(len(brokers), 1)
+ broker = brokers[0]
+ handler.request(broker, 20)
+ sleep(1)
+ self.assertEqual(handler.check(), "pass")
+
+ def test_connection_close(self):
+ """
+ Test management method for closing connection
+ """
+ self.startQmf()
+ conn = self.connect()
+ session = conn.session("my-named-session")
+
+ #using qmf find named session and close the corresponding connection:
+ qmf_ssn_object = self.qmf.getObjects(_class="session", name="my-named-session")[0]
+ qmf_ssn_object._connectionRef_.close()
+
+ #check that connection is closed
+ try:
+ conn.session("another-session")
+ self.fail("Expected failure from closed connection")
+ except: None
+
+ #make sure that the named session has been closed and the name can be re-used
+ conn = self.connect()
+ session = conn.session("my-named-session")
+ session.queue_declare(queue="whatever", exclusive=True, auto_delete=True)
+
+ def test_binding_count_on_queue(self):
+ self.startQmf()
+ conn = self.connect()
+ session = self.session
+
+ QUEUE = "binding_test_queue"
+ EX_DIR = "binding_test_exchange_direct"
+ EX_FAN = "binding_test_exchange_fanout"
+ EX_TOPIC = "binding_test_exchange_topic"
+ EX_HDR = "binding_test_exchange_headers"
+
+ #
+ # Create a test queue
+ #
+ session.queue_declare(queue=QUEUE, exclusive=True, auto_delete=True)
+ queue = self.qmf.getObjects(_class="queue", name=QUEUE)[0]
+ if not queue:
+ self.fail("Queue not found")
+ self.assertEqual(queue.bindingCount, 1, "wrong initial binding count")
+
+ #
+ # Create an exchange of each supported type
+ #
+ session.exchange_declare(exchange=EX_DIR, type="direct")
+ session.exchange_declare(exchange=EX_FAN, type="fanout")
+ session.exchange_declare(exchange=EX_TOPIC, type="topic")
+ session.exchange_declare(exchange=EX_HDR, type="headers")
+
+ #
+ # Bind each exchange to the test queue
+ #
+ match = {}
+ match['x-match'] = "all"
+ match['key'] = "value"
+ session.exchange_bind(exchange=EX_DIR, queue=QUEUE, binding_key="key1")
+ session.exchange_bind(exchange=EX_DIR, queue=QUEUE, binding_key="key2")
+ session.exchange_bind(exchange=EX_FAN, queue=QUEUE)
+ session.exchange_bind(exchange=EX_TOPIC, queue=QUEUE, binding_key="key1.#")
+ session.exchange_bind(exchange=EX_TOPIC, queue=QUEUE, binding_key="key2.#")
+ session.exchange_bind(exchange=EX_HDR, queue=QUEUE, binding_key="key1", arguments=match)
+ match['key2'] = "value2"
+ session.exchange_bind(exchange=EX_HDR, queue=QUEUE, binding_key="key2", arguments=match)
+
+ #
+ # Verify that the queue's binding count accounts for the new bindings
+ #
+ queue.update()
+ self.assertEqual(queue.bindingCount, 8,
+ "added bindings not accounted for (expected 8, got %d)" % queue.bindingCount)
+
+ #
+ # Remove some of the bindings
+ #
+ session.exchange_unbind(exchange=EX_DIR, queue=QUEUE, binding_key="key2")
+ session.exchange_unbind(exchange=EX_TOPIC, queue=QUEUE, binding_key="key2.#")
+ session.exchange_unbind(exchange=EX_HDR, queue=QUEUE, binding_key="key2")
+
+ #
+ # Verify that the queue's binding count accounts for the deleted bindings
+ #
+ queue.update()
+ self.assertEqual(queue.bindingCount, 5,
+ "deleted bindings not accounted for (expected 5, got %d)" % queue.bindingCount)
+ #
+ # Delete the exchanges
+ #
+ session.exchange_delete(exchange=EX_DIR)
+ session.exchange_delete(exchange=EX_FAN)
+ session.exchange_delete(exchange=EX_TOPIC)
+ session.exchange_delete(exchange=EX_HDR)
+
+ #
+ # Verify that the queue's binding count accounts for the lost bindings
+ #
+ queue.update()
+ self.assertEqual(queue.bindingCount, 1,
+ "deleted bindings not accounted for (expected 1, got %d)" % queue.bindingCount)
+
diff --git a/python/tests_0-10/message.py b/python/tests_0-10/message.py
index e4dc5566bd..e80333a1e6 100644
--- a/python/tests_0-10/message.py
+++ b/python/tests_0-10/message.py
@@ -23,7 +23,7 @@ from qpid.datatypes import Message, RangedSet
from qpid.session import SessionException
from qpid.content import Content
-
+from time import sleep
class MessageTests(TestBase010):
"""Tests for 'methods' on the amqp message 'class'"""
@@ -230,8 +230,8 @@ class MessageTests(TestBase010):
session.message_subscribe(destination="my-consumer", queue="test-queue-4")
myqueue = session.incoming("my-consumer")
- session.message_flow(destination="my-consumer", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="my-consumer", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="my-consumer", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="my-consumer", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
#should flush here
@@ -258,8 +258,8 @@ class MessageTests(TestBase010):
session.queue_declare(queue="test-ack-queue", auto_delete=True)
session.message_subscribe(queue = "test-ack-queue", destination = "consumer")
- session.message_flow(destination="consumer", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="consumer", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="consumer", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="consumer", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
queue = session.incoming("consumer")
delivery_properties = session.delivery_properties(routing_key="test-ack-queue")
@@ -289,8 +289,8 @@ class MessageTests(TestBase010):
session.close(timeout=10)
session = self.session
- session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
queue = session.incoming("checker")
msg3b = queue.get(timeout=1)
@@ -311,16 +311,16 @@ class MessageTests(TestBase010):
session.exchange_bind(queue = "r", exchange = "amq.fanout")
session.message_subscribe(queue = "q", destination = "consumer")
- session.message_flow(destination="consumer", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="consumer", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="consumer", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="consumer", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "blah, blah"))
msg = session.incoming("consumer").get(timeout = 1)
self.assertEquals(msg.body, "blah, blah")
session.message_reject(RangedSet(msg.id))
session.message_subscribe(queue = "r", destination = "checker")
- session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
msg = session.incoming("checker").get(timeout = 1)
self.assertEquals(msg.body, "blah, blah")
@@ -341,7 +341,7 @@ class MessageTests(TestBase010):
#set message credit to finite amount (less than enough for all messages)
session.message_flow(unit = session.credit_unit.message, value = 5, destination = "c")
#set infinite byte credit
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "c")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "c")
#check that expected number were received
q = session.incoming("c")
for i in range(1, 6):
@@ -369,12 +369,12 @@ class MessageTests(TestBase010):
session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "abcdefgh"))
#each message is currently interpreted as requiring msg_size bytes of credit
- msg_size = 21
+ msg_size = 19
#set byte credit to finite amount (less than enough for all messages)
session.message_flow(unit = session.credit_unit.byte, value = msg_size*5, destination = "c")
#set infinite message credit
- session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = "c")
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = "c")
#check that expected number were received
q = session.incoming("c")
for i in range(5):
@@ -405,7 +405,7 @@ class MessageTests(TestBase010):
#set message credit to finite amount (less than enough for all messages)
session.message_flow(unit = session.credit_unit.message, value = 5, destination = "c")
#set infinite byte credit
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "c")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "c")
#check that expected number were received
q = session.incoming("c")
for i in range(1, 6):
@@ -443,7 +443,7 @@ class MessageTests(TestBase010):
#set byte credit to finite amount (less than enough for all messages)
session.message_flow(unit = session.credit_unit.byte, value = msg_size*5, destination = "c")
#set infinite message credit
- session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = "c")
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = "c")
#check that expected number were received
q = session.incoming("c")
msgs = []
@@ -462,6 +462,42 @@ class MessageTests(TestBase010):
self.assertDataEquals(session, q.get(timeout = 1), "abcdefgh")
self.assertEmpty(q)
+ def test_window_flush_ack_flow(self):
+ """
+ Test basic window based flow control with unit = bytes
+ """
+ #declare an exclusive queue
+ ssn = self.session
+ ssn.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ #create consumer
+ ssn.message_subscribe(queue = "q", destination = "c",
+ accept_mode=ssn.accept_mode.explicit)
+ ssn.message_set_flow_mode(flow_mode = ssn.flow_mode.window, destination = "c")
+
+ #send message A
+ ssn.message_transfer(message=Message(ssn.delivery_properties(routing_key="q"), "A"))
+
+ for unit in ssn.credit_unit.VALUES:
+ ssn.message_flow("c", unit, 0xFFFFFFFFL)
+
+ q = ssn.incoming("c")
+ msgA = q.get(timeout=10)
+
+ ssn.message_flush(destination="c")
+
+ # XXX
+ ssn.receiver._completed.add(msgA.id)
+ ssn.channel.session_completed(ssn.receiver._completed)
+ ssn.message_accept(RangedSet(msgA.id))
+
+ for unit in ssn.credit_unit.VALUES:
+ ssn.message_flow("c", unit, 0xFFFFFFFFL)
+
+ #send message B
+ ssn.message_transfer(message=Message(ssn.delivery_properties(routing_key="q"), "B"))
+
+ msgB = q.get(timeout=10)
+
def test_subscribe_not_acquired(self):
"""
Test the not-acquired modes works as expected for a simple case
@@ -472,11 +508,11 @@ class MessageTests(TestBase010):
session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %s" % i))
session.message_subscribe(queue = "q", destination = "a", acquire_mode = 1)
- session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = "a")
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
session.message_subscribe(queue = "q", destination = "b", acquire_mode = 1)
- session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = "b")
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "b")
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = "b")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "b")
for i in range(6, 11):
session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %s" % i))
@@ -508,8 +544,8 @@ class MessageTests(TestBase010):
session.message_subscribe(queue = "q", destination = "a", acquire_mode = 1, accept_mode = 1)
session.message_set_flow_mode(flow_mode = session.flow_mode.credit, destination = "a")
- session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = "a")
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
msg = session.incoming("a").get(timeout = 1)
self.assertEquals("acquire me", msg.body)
#message should still be on the queue:
@@ -532,8 +568,8 @@ class MessageTests(TestBase010):
session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "acquire me"))
session.message_subscribe(queue = "q", destination = "a", acquire_mode = 1)
- session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
msg = session.incoming("a").get(timeout = 1)
self.assertEquals("acquire me", msg.body)
#message should still be on the queue:
@@ -558,8 +594,8 @@ class MessageTests(TestBase010):
session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "release me"))
session.message_subscribe(queue = "q", destination = "a")
- session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
msg = session.incoming("a").get(timeout = 1)
self.assertEquals("release me", msg.body)
session.message_cancel(destination = "a")
@@ -579,7 +615,7 @@ class MessageTests(TestBase010):
session.message_subscribe(queue = "q", destination = "a")
session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
queue = session.incoming("a")
first = queue.get(timeout = 1)
for i in range(2, 10):
@@ -612,7 +648,7 @@ class MessageTests(TestBase010):
session.message_subscribe(queue = "q", destination = "a")
session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
queue = session.incoming("a")
ids = []
for i in range (1, 11):
@@ -637,8 +673,8 @@ class MessageTests(TestBase010):
session.close(timeout=10)
session = self.session
- session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
queue = session.incoming("checker")
self.assertEquals("message 4", queue.get(timeout = 1).body)
@@ -656,7 +692,7 @@ class MessageTests(TestBase010):
session.message_subscribe(queue = "q", destination = "a")
session.message_set_flow_mode(flow_mode = 0, destination = "a")
session.message_flow(unit = session.credit_unit.message, value = 5, destination = "a")
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
queue = session.incoming("a")
for i in range(1, 6):
@@ -671,7 +707,7 @@ class MessageTests(TestBase010):
#now create a not-acquired subscriber
session.message_subscribe(queue = "q", destination = "b", acquire_mode=1)
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "b")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "b")
#check it gets those not consumed
queue = session.incoming("b")
@@ -699,7 +735,7 @@ class MessageTests(TestBase010):
#create a not-acquired subscriber
session.message_subscribe(queue = "q", destination = "a", acquire_mode=1)
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
#browse through messages
@@ -721,7 +757,7 @@ class MessageTests(TestBase010):
#create a second not-acquired subscriber
session.message_subscribe(queue = "q", destination = "b", acquire_mode=1)
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "b")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "b")
session.message_flow(unit = session.credit_unit.message, value = 1, destination = "b")
#check it gets those not consumed
queue = session.incoming("b")
@@ -748,12 +784,12 @@ class MessageTests(TestBase010):
#create two 'browsers'
session.message_subscribe(queue = "q", destination = "a", acquire_mode=1)
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
queueA = session.incoming("a")
session.message_subscribe(queue = "q", destination = "b", acquire_mode=1)
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "b")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "b")
session.message_flow(unit = session.credit_unit.message, value = 10, destination = "b")
queueB = session.incoming("b")
@@ -770,7 +806,7 @@ class MessageTests(TestBase010):
#create consumer
session.message_subscribe(queue = "q", destination = "c")
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "c")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "c")
session.message_flow(unit = session.credit_unit.message, value = 10, destination = "c")
queueC = session.incoming("c")
#consume the message then ack it
@@ -779,6 +815,41 @@ class MessageTests(TestBase010):
#ensure there are no other messages
self.assertEmpty(queueC)
+ def test_release_order(self):
+ session = self.session
+
+ #create queue
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+
+ #send messages
+ for i in range(1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "message-%d" % (i)))
+
+ #subscribe:
+ session.message_subscribe(queue="q", destination="a")
+ a = session.incoming("a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+
+ for i in range(1, 11):
+ msg = a.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ if (i % 2):
+ #accept all odd messages
+ session.message_accept(RangedSet(msg.id))
+ else:
+ #release all even messages
+ session.message_release(RangedSet(msg.id))
+
+ #browse:
+ session.message_subscribe(queue="q", destination="b", acquire_mode=1)
+ b = session.incoming("b")
+ b.start()
+ for i in [2, 4, 6, 8, 10]:
+ msg = b.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+
+
def test_empty_body(self):
session = self.session
session.queue_declare(queue="xyz", exclusive=True, auto_delete=True)
@@ -787,8 +858,8 @@ class MessageTests(TestBase010):
consumer_tag = "tag1"
session.message_subscribe(queue="xyz", destination=consumer_tag)
- session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = consumer_tag)
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = consumer_tag)
queue = session.incoming(consumer_tag)
msg = queue.get(timeout=1)
self.assertEquals("", msg.body)
@@ -810,6 +881,28 @@ class MessageTests(TestBase010):
msg = messages.get()
assert msg.body == "test"
+ def test_ttl(self):
+ q = "test_ttl"
+ session = self.session
+
+ session.queue_declare(queue=q, exclusive=True, auto_delete=True)
+
+ dp = session.delivery_properties(routing_key=q, ttl=500)#expire in half a second
+ session.message_transfer(message=Message(dp, "first"))
+
+ dp = session.delivery_properties(routing_key=q, ttl=300000)#expire in fives minutes
+ session.message_transfer(message=Message(dp, "second"))
+
+ d = "msgs"
+ session.message_subscribe(queue=q, destination=d)
+ messages = session.incoming(d)
+ sleep(1)
+ session.message_flow(unit = session.credit_unit.message, value=2, destination=d)
+ session.message_flow(unit = session.credit_unit.byte, value=0xFFFFFFFFL, destination=d)
+ assert messages.get(timeout=1).body == "second"
+ self.assertEmpty(messages)
+
+
def assertDataEquals(self, session, msg, expected):
self.assertEquals(expected, msg.body)
diff --git a/python/tests_0-10/persistence.py b/python/tests_0-10/persistence.py
index 815ad1f3dc..e9cf9b7caa 100644
--- a/python/tests_0-10/persistence.py
+++ b/python/tests_0-10/persistence.py
@@ -17,7 +17,8 @@
# under the License.
#
from qpid.datatypes import Message, RangedSet
-from qpid.testlib import testrunner, TestBase010
+#from qpid.testlib import testrunner, TestBase010
+from qpid.testlib import TestBase010
class PersistenceTests(TestBase010):
def test_delete_queue_after_publish(self):
@@ -49,7 +50,7 @@ class PersistenceTests(TestBase010):
#create consumer
session.message_subscribe(queue = "q", destination = "a", accept_mode = 1, acquire_mode=0)
- session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
queue = session.incoming("a")
diff --git a/python/tests_0-10/query.py b/python/tests_0-10/query.py
index 311df84096..d57e964982 100644
--- a/python/tests_0-10/query.py
+++ b/python/tests_0-10/query.py
@@ -133,8 +133,20 @@ class QueryTests(TestBase010):
#test exchange not found
self.assertEqual(True, session.exchange_bound(exchange="unknown-exchange").exchange_not_found)
- #test queue not found
- self.assertEqual(True, session.exchange_bound(exchange=exchange_name, queue="unknown-queue").queue_not_found)
+ #test exchange found, queue not found
+ response = session.exchange_bound(exchange=exchange_name, queue="unknown-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(True, response.queue_not_found)
+
+ #test exchange not found, queue found
+ response = session.exchange_bound(exchange="unknown-exchange", queue="used-queue")
+ self.assertEqual(True, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+
+ #test not exchange found, queue not found
+ response = session.exchange_bound(exchange="unknown-exchange", queue="unknown-queue")
+ self.assertEqual(True, response.exchange_not_found)
+ self.assertEqual(True, response.queue_not_found)
def test_exchange_bound_fanout(self):
diff --git a/python/tests_0-10/queue.py b/python/tests_0-10/queue.py
index a3b23a1c32..eb38965190 100644
--- a/python/tests_0-10/queue.py
+++ b/python/tests_0-10/queue.py
@@ -49,8 +49,8 @@ class QueueTests(TestBase010):
#send a further message and consume it, ensuring that the other messages are really gone
session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "four"))
session.message_subscribe(queue="test-queue", destination="tag")
- session.message_flow(destination="tag", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="tag", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="tag", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="tag", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
queue = session.incoming("tag")
msg = queue.get(timeout=1)
self.assertEqual("four", msg.body)
@@ -88,7 +88,7 @@ class QueueTests(TestBase010):
# TestBase.setUp has already opened session(1)
s1 = self.session
# Here we open a second separate connection:
- s2 = self.conn.session("other", 2)
+ s2 = self.conn.session("other")
#declare an exclusive queue:
s1.queue_declare(queue="exclusive-queue", exclusive=True, auto_delete=True)
@@ -98,6 +98,22 @@ class QueueTests(TestBase010):
self.fail("Expected second exclusive queue_declare to raise a channel exception")
except SessionException, e:
self.assertEquals(405, e.args[0].error_code)
+
+ s3 = self.conn.session("subscriber")
+ try:
+ #other connection should not be allowed to declare this:
+ s3.message_subscribe(queue="exclusive-queue")
+ self.fail("Expected message_subscribe on an exclusive queue to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ s4 = self.conn.session("deleter")
+ try:
+ #other connection should not be allowed to declare this:
+ s4.queue_delete(queue="exclusive-queue")
+ self.fail("Expected queue_delete on an exclusive queue to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
def test_declare_passive(self):
@@ -166,11 +182,11 @@ class QueueTests(TestBase010):
session.queue_declare(queue="queue-2", exclusive=True, auto_delete=True)
session.message_subscribe(queue="queue-1", destination="queue-1")
- session.message_flow(destination="queue-1", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="queue-1", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="queue-1", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="queue-1", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
session.message_subscribe(queue="queue-2", destination="queue-2")
- session.message_flow(destination="queue-2", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="queue-2", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="queue-2", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="queue-2", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
queue1 = session.incoming("queue-1")
queue2 = session.incoming("queue-2")
@@ -267,8 +283,8 @@ class QueueTests(TestBase010):
#empty queue:
session.message_subscribe(destination="consumer_tag", queue="delete-me-2")
- session.message_flow(destination="consumer_tag", unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination="consumer_tag", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
queue = session.incoming("consumer_tag")
msg = queue.get(timeout=1)
self.assertEqual("message", msg.body)
diff --git a/python/tests_0-10/tx.py b/python/tests_0-10/tx.py
index da162d54ec..8cdc539a08 100644
--- a/python/tests_0-10/tx.py
+++ b/python/tests_0-10/tx.py
@@ -19,7 +19,7 @@
from qpid.client import Client, Closed
from qpid.queue import Empty
from qpid.datatypes import Message, RangedSet
-from qpid.testlib import testrunner, TestBase010
+from qpid.testlib import TestBase010
class TxTests(TestBase010):
"""
@@ -251,13 +251,13 @@ class TxTests(TestBase010):
session = session or self.session
consumer_tag = keys["destination"]
session.message_subscribe(**keys)
- session.message_flow(destination=consumer_tag, unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination=consumer_tag, unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination=consumer_tag, unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination=consumer_tag, unit=session.credit_unit.byte, value=0xFFFFFFFFL)
def enable_flow(self, tag, session=None):
session = session or self.session
- session.message_flow(destination=tag, unit=session.credit_unit.message, value=0xFFFFFFFF)
- session.message_flow(destination=tag, unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_flow(destination=tag, unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination=tag, unit=session.credit_unit.byte, value=0xFFFFFFFFL)
def complete(self, session, msg):
session.receiver._completed.add(msg.id)#TODO: this may be done automatically
diff --git a/python/tests_0-8/__init__.py b/python/tests_0-8/__init__.py
index 9a09d2d04f..526f2452f8 100644
--- a/python/tests_0-8/__init__.py
+++ b/python/tests_0-8/__init__.py
@@ -18,3 +18,5 @@
# specific language governing permissions and limitations
# under the License.
#
+
+import basic, broker, example, exchange, queue, testlib, tx
diff --git a/python/tests_0-8/basic.py b/python/tests_0-8/basic.py
index 95ca0d7287..d5837fc19c 100644
--- a/python/tests_0-8/basic.py
+++ b/python/tests_0-8/basic.py
@@ -19,7 +19,7 @@
from qpid.client import Client, Closed
from qpid.queue import Empty
from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
+from qpid.testlib import TestBase
class BasicTests(TestBase):
"""Tests for 'methods' on the amqp basic 'class'"""
@@ -219,10 +219,11 @@ class BasicTests(TestBase):
channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
channel.basic_cancel(consumer_tag=subscription.consumer_tag)
- subscription2 = channel.basic_consume(queue="test-requeue")
- queue2 = self.client.queue(subscription2.consumer_tag)
channel.basic_recover(requeue=True)
+
+ subscription2 = channel.basic_consume(queue="test-requeue")
+ queue2 = self.client.queue(subscription2.consumer_tag)
msg3b = queue2.get(timeout=1)
msg5b = queue2.get(timeout=1)
diff --git a/python/tests_0-8/broker.py b/python/tests_0-8/broker.py
index d9ac69c5e3..7f3fe7530e 100644
--- a/python/tests_0-8/broker.py
+++ b/python/tests_0-8/broker.py
@@ -19,15 +19,15 @@
from qpid.client import Closed
from qpid.queue import Empty
from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
+from qpid.testlib import TestBase
class BrokerTests(TestBase):
"""Tests for basic Broker functionality"""
- def test_amqp_basic_13(self):
+ def test_ack_and_no_ack(self):
"""
First, this test tries to receive a message with a no-ack
- consumer. Second, this test tries to explicitely receive and
+ consumer. Second, this test tries to explicitly receive and
acknowledge a message with an acknowledging consumer.
"""
ch = self.channel
@@ -40,7 +40,7 @@ class BrokerTests(TestBase):
msg = self.client.queue(ctag).get(timeout = 5)
self.assert_(msg.content.body == body)
- # Acknowleding consumer
+ # Acknowledging consumer
self.queue_declare(ch, queue = "otherqueue")
ctag = ch.basic_consume(queue = "otherqueue", no_ack = False).consumer_tag
body = "test ack"
@@ -102,3 +102,19 @@ class BrokerTests(TestBase):
except Closed, e:
self.assertConnectionException(504, e.args[0])
+ def test_channel_flow(self):
+ channel = self.channel
+ channel.queue_declare(queue="flow_test_queue", exclusive=True)
+ ctag = channel.basic_consume(queue="flow_test_queue", no_ack=True).consumer_tag
+ incoming = self.client.queue(ctag)
+
+ channel.channel_flow(active=False)
+ channel.basic_publish(routing_key="flow_test_queue", content=Content("abcdefghijklmnopqrstuvwxyz"))
+ try:
+ incoming.get(timeout=1)
+ self.fail("Received message when flow turned off.")
+ except Empty: None
+
+ channel.channel_flow(active=True)
+ msg = incoming.get(timeout=1)
+ self.assertEqual("abcdefghijklmnopqrstuvwxyz", msg.content.body)
diff --git a/python/tests_0-8/example.py b/python/tests_0-8/example.py
index a1949ccb9f..d82bad1f61 100644
--- a/python/tests_0-8/example.py
+++ b/python/tests_0-8/example.py
@@ -18,7 +18,7 @@
#
from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
+from qpid.testlib import TestBase
class ExampleTest (TestBase):
"""
diff --git a/python/tests_0-8/queue.py b/python/tests_0-8/queue.py
index 60ac4c3dfb..b7a41736ab 100644
--- a/python/tests_0-8/queue.py
+++ b/python/tests_0-8/queue.py
@@ -19,7 +19,7 @@
from qpid.client import Client, Closed
from qpid.queue import Empty
from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
+from qpid.testlib import TestBase
class QueueTests(TestBase):
"""Tests for 'methods' on the amqp queue 'class'"""
diff --git a/python/tests_0-8/testlib.py b/python/tests_0-8/testlib.py
index cab07cc4ac..76f7e964a2 100644
--- a/python/tests_0-8/testlib.py
+++ b/python/tests_0-8/testlib.py
@@ -22,7 +22,7 @@
#
from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
+from qpid.testlib import TestBase
from Queue import Empty
import sys
diff --git a/python/tests_0-8/tx.py b/python/tests_0-8/tx.py
index 054fb8d8b7..9faddb1110 100644
--- a/python/tests_0-8/tx.py
+++ b/python/tests_0-8/tx.py
@@ -19,7 +19,7 @@
from qpid.client import Client, Closed
from qpid.queue import Empty
from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
+from qpid.testlib import TestBase
class TxTests(TestBase):
"""
diff --git a/python/tests_0-9/__init__.py b/python/tests_0-9/__init__.py
index 9a09d2d04f..d9f2ed7dbb 100644
--- a/python/tests_0-9/__init__.py
+++ b/python/tests_0-9/__init__.py
@@ -18,3 +18,5 @@
# specific language governing permissions and limitations
# under the License.
#
+
+import query, queue
diff --git a/python/tests_0-9/basic.py b/python/tests_0-9/basic.py
deleted file mode 100644
index 607ba26343..0000000000
--- a/python/tests_0-9/basic.py
+++ /dev/null
@@ -1,396 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-from qpid.client import Client, Closed
-from qpid.queue import Empty
-from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
-
-class BasicTests(TestBase):
- """Tests for 'methods' on the amqp basic 'class'"""
-
- def test_consume_no_local(self):
- """
- Test that the no_local flag is honoured in the consume method
- """
- channel = self.channel
- #setup, declare two queues:
- channel.queue_declare(queue="test-queue-1a", exclusive=True)
- channel.queue_declare(queue="test-queue-1b", exclusive=True)
- #establish two consumers one of which excludes delivery of locally sent messages
- channel.basic_consume(consumer_tag="local_included", queue="test-queue-1a")
- channel.basic_consume(consumer_tag="local_excluded", queue="test-queue-1b", no_local=True)
-
- #send a message
- channel.basic_publish(routing_key="test-queue-1a", content=Content("consume_no_local"))
- channel.basic_publish(routing_key="test-queue-1b", content=Content("consume_no_local"))
-
- #check the queues of the two consumers
- excluded = self.client.queue("local_excluded")
- included = self.client.queue("local_included")
- msg = included.get(timeout=1)
- self.assertEqual("consume_no_local", msg.content.body)
- try:
- excluded.get(timeout=1)
- self.fail("Received locally published message though no_local=true")
- except Empty: None
-
-
- def test_consume_exclusive(self):
- """
- Test that the exclusive flag is honoured in the consume method
- """
- channel = self.channel
- #setup, declare a queue:
- channel.queue_declare(queue="test-queue-2", exclusive=True)
-
- #check that an exclusive consumer prevents other consumer being created:
- channel.basic_consume(consumer_tag="first", queue="test-queue-2", exclusive=True)
- try:
- channel.basic_consume(consumer_tag="second", queue="test-queue-2")
- self.fail("Expected consume request to fail due to previous exclusive consumer")
- except Closed, e:
- self.assertChannelException(403, e.args[0])
-
- #open new channel and cleanup last consumer:
- channel = self.client.channel(2)
- channel.channel_open()
-
- #check that an exclusive consumer cannot be created if a consumer already exists:
- channel.basic_consume(consumer_tag="first", queue="test-queue-2")
- try:
- channel.basic_consume(consumer_tag="second", queue="test-queue-2", exclusive=True)
- self.fail("Expected exclusive consume request to fail due to previous consumer")
- except Closed, e:
- self.assertChannelException(403, e.args[0])
-
- def test_consume_queue_errors(self):
- """
- Test error conditions associated with the queue field of the consume method:
- """
- channel = self.channel
- try:
- #queue specified but doesn't exist:
- channel.basic_consume(queue="invalid-queue")
- self.fail("Expected failure when consuming from non-existent queue")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
- channel = self.client.channel(2)
- channel.channel_open()
- try:
- #queue not specified and none previously declared for channel:
- channel.basic_consume(queue="")
- self.fail("Expected failure when consuming from unspecified queue")
- except Closed, e:
- self.assertConnectionException(530, e.args[0])
-
- def test_consume_unique_consumers(self):
- """
- Ensure unique consumer tags are enforced
- """
- channel = self.channel
- #setup, declare a queue:
- channel.queue_declare(queue="test-queue-3", exclusive=True)
-
- #check that attempts to use duplicate tags are detected and prevented:
- channel.basic_consume(consumer_tag="first", queue="test-queue-3")
- try:
- channel.basic_consume(consumer_tag="first", queue="test-queue-3")
- self.fail("Expected consume request to fail due to non-unique tag")
- except Closed, e:
- self.assertConnectionException(530, e.args[0])
-
- def test_cancel(self):
- """
- Test compliance of the basic.cancel method
- """
- channel = self.channel
- #setup, declare a queue:
- channel.queue_declare(queue="test-queue-4", exclusive=True)
- channel.basic_consume(consumer_tag="my-consumer", queue="test-queue-4")
- channel.basic_publish(routing_key="test-queue-4", content=Content("One"))
-
- myqueue = self.client.queue("my-consumer")
- msg = myqueue.get(timeout=1)
- self.assertEqual("One", msg.content.body)
-
- #cancel should stop messages being delivered
- channel.basic_cancel(consumer_tag="my-consumer")
- channel.basic_publish(routing_key="test-queue-4", content=Content("Two"))
- try:
- msg = myqueue.get(timeout=1)
- self.fail("Got message after cancellation: " + msg)
- except Empty: None
-
- #cancellation of non-existant consumers should be handled without error
- channel.basic_cancel(consumer_tag="my-consumer")
- channel.basic_cancel(consumer_tag="this-never-existed")
-
-
- def test_ack(self):
- """
- Test basic ack/recover behaviour
- """
- channel = self.channel
- channel.queue_declare(queue="test-ack-queue", exclusive=True)
-
- reply = channel.basic_consume(queue="test-ack-queue", no_ack=False)
- queue = self.client.queue(reply.consumer_tag)
-
- channel.basic_publish(routing_key="test-ack-queue", content=Content("One"))
- channel.basic_publish(routing_key="test-ack-queue", content=Content("Two"))
- channel.basic_publish(routing_key="test-ack-queue", content=Content("Three"))
- channel.basic_publish(routing_key="test-ack-queue", content=Content("Four"))
- channel.basic_publish(routing_key="test-ack-queue", content=Content("Five"))
-
- msg1 = queue.get(timeout=1)
- msg2 = queue.get(timeout=1)
- msg3 = queue.get(timeout=1)
- msg4 = queue.get(timeout=1)
- msg5 = queue.get(timeout=1)
-
- self.assertEqual("One", msg1.content.body)
- self.assertEqual("Two", msg2.content.body)
- self.assertEqual("Three", msg3.content.body)
- self.assertEqual("Four", msg4.content.body)
- self.assertEqual("Five", msg5.content.body)
-
- channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
- channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
-
- channel.basic_recover(requeue=False)
-
- msg3b = queue.get(timeout=1)
- msg5b = queue.get(timeout=1)
-
- self.assertEqual("Three", msg3b.content.body)
- self.assertEqual("Five", msg5b.content.body)
-
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected message: " + extra.content.body)
- except Empty: None
-
- def test_recover_requeue(self):
- """
- Test requeing on recovery
- """
- channel = self.channel
- channel.queue_declare(queue="test-requeue", exclusive=True)
-
- subscription = channel.basic_consume(queue="test-requeue", no_ack=False)
- queue = self.client.queue(subscription.consumer_tag)
-
- channel.basic_publish(routing_key="test-requeue", content=Content("One"))
- channel.basic_publish(routing_key="test-requeue", content=Content("Two"))
- channel.basic_publish(routing_key="test-requeue", content=Content("Three"))
- channel.basic_publish(routing_key="test-requeue", content=Content("Four"))
- channel.basic_publish(routing_key="test-requeue", content=Content("Five"))
-
- msg1 = queue.get(timeout=1)
- msg2 = queue.get(timeout=1)
- msg3 = queue.get(timeout=1)
- msg4 = queue.get(timeout=1)
- msg5 = queue.get(timeout=1)
-
- self.assertEqual("One", msg1.content.body)
- self.assertEqual("Two", msg2.content.body)
- self.assertEqual("Three", msg3.content.body)
- self.assertEqual("Four", msg4.content.body)
- self.assertEqual("Five", msg5.content.body)
-
- channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
- channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
-
- channel.basic_cancel(consumer_tag=subscription.consumer_tag)
-
- channel.basic_recover(requeue=True)
-
- subscription2 = channel.basic_consume(queue="test-requeue")
- queue2 = self.client.queue(subscription2.consumer_tag)
-
- msg3b = queue2.get(timeout=1)
- msg5b = queue2.get(timeout=1)
-
- self.assertEqual("Three", msg3b.content.body)
- self.assertEqual("Five", msg5b.content.body)
-
- self.assertEqual(True, msg3b.redelivered)
- self.assertEqual(True, msg5b.redelivered)
-
- try:
- extra = queue2.get(timeout=1)
- self.fail("Got unexpected message in second queue: " + extra.content.body)
- except Empty: None
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected message in original queue: " + extra.content.body)
- except Empty: None
-
-
- def test_qos_prefetch_count(self):
- """
- Test that the prefetch count specified is honoured
- """
- #setup: declare queue and subscribe
- channel = self.channel
- channel.queue_declare(queue="test-prefetch-count", exclusive=True)
- subscription = channel.basic_consume(queue="test-prefetch-count", no_ack=False)
- queue = self.client.queue(subscription.consumer_tag)
-
- #set prefetch to 5:
- channel.basic_qos(prefetch_count=5)
-
- #publish 10 messages:
- for i in range(1, 11):
- channel.basic_publish(routing_key="test-prefetch-count", content=Content("Message %d" % i))
-
- #only 5 messages should have been delivered:
- for i in range(1, 6):
- msg = queue.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.content.body)
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
- except Empty: None
-
- #ack messages and check that the next set arrive ok:
- channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
-
- for i in range(6, 11):
- msg = queue.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.content.body)
-
- channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
-
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
- except Empty: None
-
-
-
- def test_qos_prefetch_size(self):
- """
- Test that the prefetch size specified is honoured
- """
- #setup: declare queue and subscribe
- channel = self.channel
- channel.queue_declare(queue="test-prefetch-size", exclusive=True)
- subscription = channel.basic_consume(queue="test-prefetch-size", no_ack=False)
- queue = self.client.queue(subscription.consumer_tag)
-
- #set prefetch to 50 bytes (each message is 9 or 10 bytes):
- channel.basic_qos(prefetch_size=50)
-
- #publish 10 messages:
- for i in range(1, 11):
- channel.basic_publish(routing_key="test-prefetch-size", content=Content("Message %d" % i))
-
- #only 5 messages should have been delivered (i.e. 45 bytes worth):
- for i in range(1, 6):
- msg = queue.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.content.body)
-
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
- except Empty: None
-
- #ack messages and check that the next set arrive ok:
- channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
-
- for i in range(6, 11):
- msg = queue.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.content.body)
-
- channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
-
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
- except Empty: None
-
- #make sure that a single oversized message still gets delivered
- large = "abcdefghijklmnopqrstuvwxyz"
- large = large + "-" + large;
- channel.basic_publish(routing_key="test-prefetch-size", content=Content(large))
- msg = queue.get(timeout=1)
- self.assertEqual(large, msg.content.body)
-
- def test_get(self):
- """
- Test basic_get method
- """
- channel = self.channel
- channel.queue_declare(queue="test-get", exclusive=True)
-
- #publish some messages (no_ack=True)
- for i in range(1, 11):
- channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
-
- #use basic_get to read back the messages, and check that we get an empty at the end
- for i in range(1, 11):
- reply = channel.basic_get(no_ack=True)
- self.assertEqual(reply.method.klass.name, "basic")
- self.assertEqual(reply.method.name, "get_ok")
- self.assertEqual("Message %d" % i, reply.content.body)
-
- reply = channel.basic_get(no_ack=True)
- self.assertEqual(reply.method.klass.name, "basic")
- self.assertEqual(reply.method.name, "get_empty")
-
- #repeat for no_ack=False
- for i in range(11, 21):
- channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
-
- for i in range(11, 21):
- reply = channel.basic_get(no_ack=False)
- self.assertEqual(reply.method.klass.name, "basic")
- self.assertEqual(reply.method.name, "get_ok")
- self.assertEqual("Message %d" % i, reply.content.body)
- if(i == 13):
- channel.basic_ack(delivery_tag=reply.delivery_tag, multiple=True)
- if(i in [15, 17, 19]):
- channel.basic_ack(delivery_tag=reply.delivery_tag)
-
- reply = channel.basic_get(no_ack=True)
- self.assertEqual(reply.method.klass.name, "basic")
- self.assertEqual(reply.method.name, "get_empty")
-
- #recover(requeue=True)
- channel.basic_recover(requeue=True)
-
- #get the unacked messages again (14, 16, 18, 20)
- for i in [14, 16, 18, 20]:
- reply = channel.basic_get(no_ack=False)
- self.assertEqual(reply.method.klass.name, "basic")
- self.assertEqual(reply.method.name, "get_ok")
- self.assertEqual("Message %d" % i, reply.content.body)
- channel.basic_ack(delivery_tag=reply.delivery_tag)
-
- reply = channel.basic_get(no_ack=True)
- self.assertEqual(reply.method.klass.name, "basic")
- self.assertEqual(reply.method.name, "get_empty")
-
- channel.basic_recover(requeue=True)
-
- reply = channel.basic_get(no_ack=True)
- self.assertEqual(reply.method.klass.name, "basic")
- self.assertEqual(reply.method.name, "get_empty")
diff --git a/python/tests_0-9/broker.py b/python/tests_0-9/broker.py
deleted file mode 100644
index 03b4132d3e..0000000000
--- a/python/tests_0-9/broker.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-from qpid.client import Closed
-from qpid.queue import Empty
-from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
-
-class BrokerTests(TestBase):
- """Tests for basic Broker functionality"""
-
- def test_ack_and_no_ack(self):
- """
- First, this test tries to receive a message with a no-ack
- consumer. Second, this test tries to explicitly receive and
- acknowledge a message with an acknowledging consumer.
- """
- ch = self.channel
- self.queue_declare(ch, queue = "myqueue")
-
- # No ack consumer
- ctag = "tag1"
- ch.message_consume(queue = "myqueue", destination = ctag, no_ack = True)
- body = "test no-ack"
- ch.message_transfer(routing_key = "myqueue", body = body)
- msg = self.client.queue(ctag).get(timeout = 5)
- self.assert_(msg.body == body)
-
- # Acknowledging consumer
- self.queue_declare(ch, queue = "otherqueue")
- ctag = "tag2"
- ch.message_consume(queue = "otherqueue", destination = ctag, no_ack = False)
- body = "test ack"
- ch.message_transfer(routing_key = "otherqueue", body = body)
- msg = self.client.queue(ctag).get(timeout = 5)
- msg.ok()
- self.assert_(msg.body == body)
-
- def test_simple_delivery_immediate(self):
- """
- Test simple message delivery where consume is issued before publish
- """
- channel = self.channel
- self.exchange_declare(channel, exchange="test-exchange", type="direct")
- self.queue_declare(channel, queue="test-queue")
- channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
- consumer_tag = "tag1"
- channel.message_consume(queue="test-queue", destination=consumer_tag, no_ack=True)
- queue = self.client.queue(consumer_tag)
-
- body = "Immediate Delivery"
- channel.message_transfer(destination="test-exchange", routing_key="key", body=body, immediate=True)
- msg = queue.get(timeout=5)
- self.assert_(msg.body == body)
-
- # TODO: Ensure we fail if immediate=True and there's no consumer.
-
-
- def test_simple_delivery_queued(self):
- """
- Test basic message delivery where publish is issued before consume
- (i.e. requires queueing of the message)
- """
- channel = self.channel
- self.exchange_declare(channel, exchange="test-exchange", type="direct")
- self.queue_declare(channel, queue="test-queue")
- channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
- body = "Queued Delivery"
- channel.message_transfer(destination="test-exchange", routing_key="key", body=body)
-
- consumer_tag = "tag1"
- channel.message_consume(queue="test-queue", destination=consumer_tag, no_ack=True)
- queue = self.client.queue(consumer_tag)
- msg = queue.get(timeout=5)
- self.assert_(msg.body == body)
-
- def test_invalid_channel(self):
- channel = self.client.channel(200)
- try:
- channel.queue_declare(exclusive=True)
- self.fail("Expected error on queue_declare for invalid channel")
- except Closed, e:
- self.assertConnectionException(504, e.args[0])
-
- def test_closed_channel(self):
- channel = self.client.channel(200)
- channel.channel_open()
- channel.channel_close()
- try:
- channel.queue_declare(exclusive=True)
- self.fail("Expected error on queue_declare for closed channel")
- except Closed, e:
- if isinstance(e.args[0], str): self.fail(e)
- self.assertConnectionException(504, e.args[0])
-
- def test_ping_pong(self):
- channel = self.channel
- reply = channel.channel_ping()
- self.assertEqual(reply.method.klass.name, "channel")
- self.assertEqual(reply.method.name, "ok")
- #todo: provide a way to get notified of incoming pongs...
-
- def test_channel_flow(self):
- channel = self.channel
- channel.queue_declare(queue="flow_test_queue", exclusive=True)
- channel.message_consume(destination="my-tag", queue="flow_test_queue")
- incoming = self.client.queue("my-tag")
-
- channel.channel_flow(active=False)
- channel.message_transfer(routing_key="flow_test_queue", body="abcdefghijklmnopqrstuvwxyz")
- try:
- incoming.get(timeout=1)
- self.fail("Received message when flow turned off.")
- except Empty: None
-
- channel.channel_flow(active=True)
- msg = incoming.get(timeout=1)
- self.assertEqual("abcdefghijklmnopqrstuvwxyz", msg.body)
diff --git a/python/tests_0-9/dtx.py b/python/tests_0-9/dtx.py
deleted file mode 100644
index bc268f4129..0000000000
--- a/python/tests_0-9/dtx.py
+++ /dev/null
@@ -1,587 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-from qpid.client import Client, Closed
-from qpid.queue import Empty
-from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
-from struct import pack, unpack
-from time import sleep
-
-class DtxTests(TestBase):
- """
- Tests for the amqp dtx related classes.
-
- Tests of the form test_simple_xxx test the basic transactional
- behaviour. The approach here is to 'swap' a message from one queue
- to another by consuming and re-publishing in the same
- transaction. That transaction is then completed in different ways
- and the appropriate result verified.
-
- The other tests enforce more specific rules and behaviour on a
- per-method or per-field basis.
- """
-
- XA_RBROLLBACK = 1
- XA_RBTIMEOUT = 2
- XA_OK = 8
-
- def test_simple_commit(self):
- """
- Test basic one-phase commit behaviour.
- """
- channel = self.channel
- tx = self.xid("my-xid")
- self.txswap(tx, "commit")
-
- #neither queue should have any messages accessible
- self.assertMessageCount(0, "queue-a")
- self.assertMessageCount(0, "queue-b")
-
- #commit
- self.assertEqual(self.XA_OK, channel.dtx_coordination_commit(xid=tx, one_phase=True).flags)
-
- #check result
- self.assertMessageCount(0, "queue-a")
- self.assertMessageCount(1, "queue-b")
- self.assertMessageId("commit", "queue-b")
-
- def test_simple_prepare_commit(self):
- """
- Test basic two-phase commit behaviour.
- """
- channel = self.channel
- tx = self.xid("my-xid")
- self.txswap(tx, "prepare-commit")
-
- #prepare
- self.assertEqual(self.XA_OK, channel.dtx_coordination_prepare(xid=tx).flags)
-
- #neither queue should have any messages accessible
- self.assertMessageCount(0, "queue-a")
- self.assertMessageCount(0, "queue-b")
-
- #commit
- self.assertEqual(self.XA_OK, channel.dtx_coordination_commit(xid=tx, one_phase=False).flags)
-
- #check result
- self.assertMessageCount(0, "queue-a")
- self.assertMessageCount(1, "queue-b")
- self.assertMessageId("prepare-commit", "queue-b")
-
-
- def test_simple_rollback(self):
- """
- Test basic rollback behaviour.
- """
- channel = self.channel
- tx = self.xid("my-xid")
- self.txswap(tx, "rollback")
-
- #neither queue should have any messages accessible
- self.assertMessageCount(0, "queue-a")
- self.assertMessageCount(0, "queue-b")
-
- #rollback
- self.assertEqual(self.XA_OK, channel.dtx_coordination_rollback(xid=tx).flags)
-
- #check result
- self.assertMessageCount(1, "queue-a")
- self.assertMessageCount(0, "queue-b")
- self.assertMessageId("rollback", "queue-a")
-
- def test_simple_prepare_rollback(self):
- """
- Test basic rollback behaviour after the transaction has been prepared.
- """
- channel = self.channel
- tx = self.xid("my-xid")
- self.txswap(tx, "prepare-rollback")
-
- #prepare
- self.assertEqual(self.XA_OK, channel.dtx_coordination_prepare(xid=tx).flags)
-
- #neither queue should have any messages accessible
- self.assertMessageCount(0, "queue-a")
- self.assertMessageCount(0, "queue-b")
-
- #rollback
- self.assertEqual(self.XA_OK, channel.dtx_coordination_rollback(xid=tx).flags)
-
- #check result
- self.assertMessageCount(1, "queue-a")
- self.assertMessageCount(0, "queue-b")
- self.assertMessageId("prepare-rollback", "queue-a")
-
- def test_select_required(self):
- """
- check that an error is flagged if select is not issued before
- start or end
- """
- channel = self.channel
- tx = self.xid("dummy")
- try:
- channel.dtx_demarcation_start(xid=tx)
-
- #if we get here we have failed, but need to do some cleanup:
- channel.dtx_demarcation_end(xid=tx)
- channel.dtx_coordination_rollback(xid=tx)
- self.fail("Channel not selected for use with dtx, expected exception!")
- except Closed, e:
- self.assertConnectionException(503, e.args[0])
-
- def test_start_already_known(self):
- """
- Verify that an attempt to start an association with a
- transaction that is already known is not allowed (unless the
- join flag is set).
- """
- #create two channels on different connection & select them for use with dtx:
- channel1 = self.channel
- channel1.dtx_demarcation_select()
-
- other = self.connect()
- channel2 = other.channel(1)
- channel2.channel_open()
- channel2.dtx_demarcation_select()
-
- #create a xid
- tx = self.xid("dummy")
- #start work on one channel under that xid:
- channel1.dtx_demarcation_start(xid=tx)
- #then start on the other without the join set
- failed = False
- try:
- channel2.dtx_demarcation_start(xid=tx)
- except Closed, e:
- failed = True
- error = e
-
- #cleanup:
- if not failed:
- channel2.dtx_demarcation_end(xid=tx)
- other.close()
- channel1.dtx_demarcation_end(xid=tx)
- channel1.dtx_coordination_rollback(xid=tx)
-
- #verification:
- if failed: self.assertConnectionException(503, e.args[0])
- else: self.fail("Xid already known, expected exception!")
-
- def test_forget_xid_on_completion(self):
- """
- Verify that a xid is 'forgotten' - and can therefore be used
- again - once it is completed.
- """
- channel = self.channel
- #do some transactional work & complete the transaction
- self.test_simple_commit()
-
- #start association for the same xid as the previously completed txn
- tx = self.xid("my-xid")
- channel.dtx_demarcation_start(xid=tx)
- channel.dtx_demarcation_end(xid=tx)
- channel.dtx_coordination_rollback(xid=tx)
-
- def test_start_join_and_resume(self):
- """
- Ensure the correct error is signalled when both the join and
- resume flags are set on starting an association between a
- channel and a transcation.
- """
- channel = self.channel
- channel.dtx_demarcation_select()
- tx = self.xid("dummy")
- try:
- channel.dtx_demarcation_start(xid=tx, join=True, resume=True)
- #failed, but need some cleanup:
- channel.dtx_demarcation_end(xid=tx)
- channel.dtx_coordination_rollback(xid=tx)
- self.fail("Join and resume both set, expected exception!")
- except Closed, e:
- self.assertConnectionException(503, e.args[0])
-
- def test_start_join(self):
- """
- Verify 'join' behaviour, where a channel is associated with a
- transaction that is already associated with another channel.
- """
- #create two channels & select them for use with dtx:
- channel1 = self.channel
- channel1.dtx_demarcation_select()
-
- channel2 = self.client.channel(2)
- channel2.channel_open()
- channel2.dtx_demarcation_select()
-
- #setup
- channel1.queue_declare(queue="one", exclusive=True)
- channel1.queue_declare(queue="two", exclusive=True)
- channel1.message_transfer(routing_key="one", message_id="a", body="DtxMessage")
- channel1.message_transfer(routing_key="two", message_id="b", body="DtxMessage")
-
- #create a xid
- tx = self.xid("dummy")
- #start work on one channel under that xid:
- channel1.dtx_demarcation_start(xid=tx)
- #then start on the other with the join flag set
- channel2.dtx_demarcation_start(xid=tx, join=True)
-
- #do work through each channel
- self.swap(channel1, "one", "two")#swap 'a' from 'one' to 'two'
- self.swap(channel2, "two", "one")#swap 'b' from 'two' to 'one'
-
- #mark end on both channels
- channel1.dtx_demarcation_end(xid=tx)
- channel2.dtx_demarcation_end(xid=tx)
-
- #commit and check
- channel1.dtx_coordination_commit(xid=tx, one_phase=True)
- self.assertMessageCount(1, "one")
- self.assertMessageCount(1, "two")
- self.assertMessageId("a", "two")
- self.assertMessageId("b", "one")
-
-
- def test_suspend_resume(self):
- """
- Test suspension and resumption of an association
- """
- channel = self.channel
- channel.dtx_demarcation_select()
-
- #setup
- channel.queue_declare(queue="one", exclusive=True)
- channel.queue_declare(queue="two", exclusive=True)
- channel.message_transfer(routing_key="one", message_id="a", body="DtxMessage")
- channel.message_transfer(routing_key="two", message_id="b", body="DtxMessage")
-
- tx = self.xid("dummy")
-
- channel.dtx_demarcation_start(xid=tx)
- self.swap(channel, "one", "two")#swap 'a' from 'one' to 'two'
- channel.dtx_demarcation_end(xid=tx, suspend=True)
-
- channel.dtx_demarcation_start(xid=tx, resume=True)
- self.swap(channel, "two", "one")#swap 'b' from 'two' to 'one'
- channel.dtx_demarcation_end(xid=tx)
-
- #commit and check
- channel.dtx_coordination_commit(xid=tx, one_phase=True)
- self.assertMessageCount(1, "one")
- self.assertMessageCount(1, "two")
- self.assertMessageId("a", "two")
- self.assertMessageId("b", "one")
-
- def test_end_suspend_and_fail(self):
- """
- Verify that the correct error is signalled if the suspend and
- fail flag are both set when disassociating a transaction from
- the channel
- """
- channel = self.channel
- channel.dtx_demarcation_select()
- tx = self.xid("suspend_and_fail")
- channel.dtx_demarcation_start(xid=tx)
- try:
- channel.dtx_demarcation_end(xid=tx, suspend=True, fail=True)
- self.fail("Suspend and fail both set, expected exception!")
- except Closed, e:
- self.assertConnectionException(503, e.args[0])
-
- #cleanup
- other = self.connect()
- channel = other.channel(1)
- channel.channel_open()
- channel.dtx_coordination_rollback(xid=tx)
- channel.channel_close()
- other.close()
-
-
- def test_end_unknown_xid(self):
- """
- Verifies that the correct exception is thrown when an attempt
- is made to end the association for a xid not previously
- associated with the channel
- """
- channel = self.channel
- channel.dtx_demarcation_select()
- tx = self.xid("unknown-xid")
- try:
- channel.dtx_demarcation_end(xid=tx)
- self.fail("Attempted to end association with unknown xid, expected exception!")
- except Closed, e:
- #FYI: this is currently *not* the exception specified, but I think the spec is wrong! Confirming...
- self.assertConnectionException(503, e.args[0])
-
- def test_end(self):
- """
- Verify that the association is terminated by end and subsequent
- operations are non-transactional
- """
- channel = self.client.channel(2)
- channel.channel_open()
- channel.queue_declare(queue="tx-queue", exclusive=True)
-
- #publish a message under a transaction
- channel.dtx_demarcation_select()
- tx = self.xid("dummy")
- channel.dtx_demarcation_start(xid=tx)
- channel.message_transfer(routing_key="tx-queue", message_id="one", body="DtxMessage")
- channel.dtx_demarcation_end(xid=tx)
-
- #now that association with txn is ended, publish another message
- channel.message_transfer(routing_key="tx-queue", message_id="two", body="DtxMessage")
-
- #check the second message is available, but not the first
- self.assertMessageCount(1, "tx-queue")
- channel.message_consume(queue="tx-queue", destination="results", no_ack=False)
- msg = self.client.queue("results").get(timeout=1)
- self.assertEqual("two", msg.message_id)
- channel.message_cancel(destination="results")
- #ack the message then close the channel
- msg.ok()
- channel.channel_close()
-
- channel = self.channel
- #commit the transaction and check that the first message (and
- #only the first message) is then delivered
- channel.dtx_coordination_commit(xid=tx, one_phase=True)
- self.assertMessageCount(1, "tx-queue")
- self.assertMessageId("one", "tx-queue")
-
- def test_invalid_commit_one_phase_true(self):
- """
- Test that a commit with one_phase = True is rejected if the
- transaction in question has already been prepared.
- """
- other = self.connect()
- tester = other.channel(1)
- tester.channel_open()
- tester.queue_declare(queue="dummy", exclusive=True)
- tester.dtx_demarcation_select()
- tx = self.xid("dummy")
- tester.dtx_demarcation_start(xid=tx)
- tester.message_transfer(routing_key="dummy", body="whatever")
- tester.dtx_demarcation_end(xid=tx)
- tester.dtx_coordination_prepare(xid=tx)
- failed = False
- try:
- tester.dtx_coordination_commit(xid=tx, one_phase=True)
- except Closed, e:
- failed = True
- error = e
-
- if failed:
- self.channel.dtx_coordination_rollback(xid=tx)
- self.assertConnectionException(503, e.args[0])
- else:
- tester.channel_close()
- other.close()
- self.fail("Invalid use of one_phase=True, expected exception!")
-
- def test_invalid_commit_one_phase_false(self):
- """
- Test that a commit with one_phase = False is rejected if the
- transaction in question has not yet been prepared.
- """
- """
- Test that a commit with one_phase = True is rejected if the
- transaction in question has already been prepared.
- """
- other = self.connect()
- tester = other.channel(1)
- tester.channel_open()
- tester.queue_declare(queue="dummy", exclusive=True)
- tester.dtx_demarcation_select()
- tx = self.xid("dummy")
- tester.dtx_demarcation_start(xid=tx)
- tester.message_transfer(routing_key="dummy", body="whatever")
- tester.dtx_demarcation_end(xid=tx)
- failed = False
- try:
- tester.dtx_coordination_commit(xid=tx, one_phase=False)
- except Closed, e:
- failed = True
- error = e
-
- if failed:
- self.channel.dtx_coordination_rollback(xid=tx)
- self.assertConnectionException(503, e.args[0])
- else:
- tester.channel_close()
- other.close()
- self.fail("Invalid use of one_phase=False, expected exception!")
-
- def test_implicit_end(self):
- """
- Test that an association is implicitly ended when the channel
- is closed (whether by exception or explicit client request)
- and the transaction in question is marked as rollback only.
- """
- channel1 = self.channel
- channel2 = self.client.channel(2)
- channel2.channel_open()
-
- #setup:
- channel2.queue_declare(queue="dummy", exclusive=True)
- channel2.message_transfer(routing_key="dummy", body="whatever")
- tx = self.xid("dummy")
-
- channel2.dtx_demarcation_select()
- channel2.dtx_demarcation_start(xid=tx)
- channel2.message_get(queue="dummy", destination="dummy")
- self.client.queue("dummy").get(timeout=1).ok()
- channel2.message_transfer(routing_key="dummy", body="whatever")
- channel2.channel_close()
-
- self.assertEqual(self.XA_RBROLLBACK, channel1.dtx_coordination_prepare(xid=tx).flags)
- channel1.dtx_coordination_rollback(xid=tx)
-
- def test_get_timeout(self):
- """
- Check that get-timeout returns the correct value, (and that a
- transaction with a timeout can complete normally)
- """
- channel = self.channel
- tx = self.xid("dummy")
-
- channel.dtx_demarcation_select()
- channel.dtx_demarcation_start(xid=tx)
- self.assertEqual(0, channel.dtx_coordination_get_timeout(xid=tx).timeout)
- channel.dtx_coordination_set_timeout(xid=tx, timeout=60)
- self.assertEqual(60, channel.dtx_coordination_get_timeout(xid=tx).timeout)
- self.assertEqual(self.XA_OK, channel.dtx_demarcation_end(xid=tx).flags)
- self.assertEqual(self.XA_OK, channel.dtx_coordination_rollback(xid=tx).flags)
-
- def test_set_timeout(self):
- """
- Test the timeout of a transaction results in the expected
- behaviour
- """
- #open new channel to allow self.channel to be used in checking te queue
- channel = self.client.channel(2)
- channel.channel_open()
- #setup:
- tx = self.xid("dummy")
- channel.queue_declare(queue="queue-a", exclusive=True)
- channel.queue_declare(queue="queue-b", exclusive=True)
- channel.message_transfer(routing_key="queue-a", message_id="timeout", body="DtxMessage")
-
- channel.dtx_demarcation_select()
- channel.dtx_demarcation_start(xid=tx)
- self.swap(channel, "queue-a", "queue-b")
- channel.dtx_coordination_set_timeout(xid=tx, timeout=2)
- sleep(3)
- #check that the work has been rolled back already
- self.assertMessageCount(1, "queue-a")
- self.assertMessageCount(0, "queue-b")
- self.assertMessageId("timeout", "queue-a")
- #check the correct codes are returned when we try to complete the txn
- self.assertEqual(self.XA_RBTIMEOUT, channel.dtx_demarcation_end(xid=tx).flags)
- self.assertEqual(self.XA_RBTIMEOUT, channel.dtx_coordination_rollback(xid=tx).flags)
-
-
-
- def test_recover(self):
- """
- Test basic recover behaviour
- """
- channel = self.channel
-
- channel.dtx_demarcation_select()
- channel.queue_declare(queue="dummy", exclusive=True)
-
- prepared = []
- for i in range(1, 10):
- tx = self.xid("tx%s" % (i))
- channel.dtx_demarcation_start(xid=tx)
- channel.message_transfer(routing_key="dummy", body="message%s" % (i))
- channel.dtx_demarcation_end(xid=tx)
- if i in [2, 5, 6, 8]:
- channel.dtx_coordination_prepare(xid=tx)
- prepared.append(tx)
- else:
- channel.dtx_coordination_rollback(xid=tx)
-
- indoubt = channel.dtx_coordination_recover().xids
- #convert indoubt table to a list of xids (note: this will change for 0-10)
- data = indoubt["xids"]
- xids = []
- pos = 0
- while pos < len(data):
- size = unpack("!B", data[pos])[0]
- start = pos + 1
- end = start + size
- xid = data[start:end]
- xids.append(xid)
- pos = end
-
- #rollback the prepared transactions returned by recover
- for x in xids:
- channel.dtx_coordination_rollback(xid=x)
-
- #validate against the expected list of prepared transactions
- actual = set(xids)
- expected = set(prepared)
- intersection = actual.intersection(expected)
-
- if intersection != expected:
- missing = expected.difference(actual)
- extra = actual.difference(expected)
- for x in missing:
- channel.dtx_coordination_rollback(xid=x)
- self.fail("Recovered xids not as expected. missing: %s; extra: %s" % (missing, extra))
-
- def xid(self, txid, branchqual = ''):
- return pack('LBB', 0, len(txid), len(branchqual)) + txid + branchqual
-
- def txswap(self, tx, id):
- channel = self.channel
- #declare two queues:
- channel.queue_declare(queue="queue-a", exclusive=True)
- channel.queue_declare(queue="queue-b", exclusive=True)
- #put message with specified id on one queue:
- channel.message_transfer(routing_key="queue-a", message_id=id, body="DtxMessage")
-
- #start the transaction:
- channel.dtx_demarcation_select()
- self.assertEqual(self.XA_OK, self.channel.dtx_demarcation_start(xid=tx).flags)
-
- #'swap' the message from one queue to the other, under that transaction:
- self.swap(self.channel, "queue-a", "queue-b")
-
- #mark the end of the transactional work:
- self.assertEqual(self.XA_OK, self.channel.dtx_demarcation_end(xid=tx).flags)
-
- def swap(self, channel, src, dest):
- #consume from src:
- channel.message_get(destination="temp-swap", queue=src)
- msg = self.client.queue("temp-swap").get(timeout=1)
- msg.ok();
-
- #re-publish to dest
- channel.message_transfer(routing_key=dest, message_id=msg.message_id, body=msg.body)
-
- def assertMessageCount(self, expected, queue):
- self.assertEqual(expected, self.channel.queue_declare(queue=queue, passive=True).message_count)
-
- def assertMessageId(self, expected, queue):
- self.channel.message_consume(queue=queue, destination="results", no_ack=True)
- self.assertEqual(expected, self.client.queue("results").get(timeout=1).message_id)
- self.channel.message_cancel(destination="results")
diff --git a/python/tests_0-9/example.py b/python/tests_0-9/example.py
deleted file mode 100644
index 7ab4cc7d0a..0000000000
--- a/python/tests_0-9/example.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
-
-class ExampleTest (TestBase):
- """
- An example Qpid test, illustrating the unittest frameowkr and the
- python Qpid client. The test class must inherit TestCase. The
- test code uses the Qpid client to interact with a qpid broker and
- verify it behaves as expected.
- """
-
- def test_example(self):
- """
- An example test. Note that test functions must start with 'test_'
- to be recognized by the test framework.
- """
-
- # By inheriting TestBase, self.client is automatically connected
- # and self.channel is automatically opened as channel(1)
- # Other channel methods mimic the protocol.
- channel = self.channel
-
- # Now we can send regular commands. If you want to see what the method
- # arguments mean or what other commands are available, you can use the
- # python builtin help() method. For example:
- #help(chan)
- #help(chan.exchange_declare)
-
- # If you want browse the available protocol methods without being
- # connected to a live server you can use the amqp-doc utility:
- #
- # Usage amqp-doc [<options>] <spec> [<pattern_1> ... <pattern_n>]
- #
- # Options:
- # -e, --regexp use regex instead of glob when matching
-
- # Now that we know what commands are available we can use them to
- # interact with the server.
-
- # Here we use ordinal arguments.
- self.exchange_declare(channel, 0, "test", "direct")
-
- # Here we use keyword arguments.
- self.queue_declare(channel, queue="test-queue")
- channel.queue_bind(queue="test-queue", exchange="test", routing_key="key")
-
- # Call Channel.basic_consume to register as a consumer.
- # All the protocol methods return a message object. The message object
- # has fields corresponding to the reply method fields, plus a content
- # field that is filled if the reply includes content. In this case the
- # interesting field is the consumer_tag.
- channel.message_consume(queue="test-queue", destination="consumer_tag")
-
- # We can use the Client.queue(...) method to access the queue
- # corresponding to our consumer_tag.
- queue = self.client.queue("consumer_tag")
-
- # Now lets publish a message and see if our consumer gets it. To do
- # this we need to import the Content class.
- body = "Hello World!"
- channel.message_transfer(destination="test",
- routing_key="key",
- body = body)
-
- # Now we'll wait for the message to arrive. We can use the timeout
- # argument in case the server hangs. By default queue.get() will wait
- # until a message arrives or the connection to the server dies.
- msg = queue.get(timeout=10)
-
- # And check that we got the right response with assertEqual
- self.assertEqual(body, msg.body)
-
- # Now acknowledge the message.
- msg.ok()
-
diff --git a/python/tests_0-9/exchange.py b/python/tests_0-9/exchange.py
deleted file mode 100644
index 3a47ffff8c..0000000000
--- a/python/tests_0-9/exchange.py
+++ /dev/null
@@ -1,327 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-"""
-Tests for exchange behaviour.
-
-Test classes ending in 'RuleTests' are derived from rules in amqp.xml.
-"""
-
-import Queue, logging
-from qpid.testlib import TestBase
-from qpid.content import Content
-from qpid.client import Closed
-
-
-class StandardExchangeVerifier:
- """Verifies standard exchange behavior.
-
- Used as base class for classes that test standard exchanges."""
-
- def verifyDirectExchange(self, ex):
- """Verify that ex behaves like a direct exchange."""
- self.queue_declare(queue="q")
- self.channel.queue_bind(queue="q", exchange=ex, routing_key="k")
- self.assertPublishConsume(exchange=ex, queue="q", routing_key="k")
- try:
- self.assertPublishConsume(exchange=ex, queue="q", routing_key="kk")
- self.fail("Expected Empty exception")
- except Queue.Empty: None # Expected
-
- def verifyFanOutExchange(self, ex):
- """Verify that ex behaves like a fanout exchange."""
- self.queue_declare(queue="q")
- self.channel.queue_bind(queue="q", exchange=ex)
- self.queue_declare(queue="p")
- self.channel.queue_bind(queue="p", exchange=ex)
- for qname in ["q", "p"]: self.assertPublishGet(self.consume(qname), ex)
-
- def verifyTopicExchange(self, ex):
- """Verify that ex behaves like a topic exchange"""
- self.queue_declare(queue="a")
- self.channel.queue_bind(queue="a", exchange=ex, routing_key="a.#.b.*")
- q = self.consume("a")
- self.assertPublishGet(q, ex, "a.b.x")
- self.assertPublishGet(q, ex, "a.x.b.x")
- self.assertPublishGet(q, ex, "a.x.x.b.x")
- # Shouldn't match
- self.channel.message_transfer(destination=ex, routing_key="a.b", body="")
- self.channel.message_transfer(destination=ex, routing_key="a.b.x.y", body="")
- self.channel.message_transfer(destination=ex, routing_key="x.a.b.x", body="")
- self.channel.message_transfer(destination=ex, routing_key="a.b", body="")
- self.assert_(q.empty())
-
- def verifyHeadersExchange(self, ex):
- """Verify that ex is a headers exchange"""
- self.queue_declare(queue="q")
- self.channel.queue_bind(queue="q", exchange=ex, arguments={ "x-match":"all", "name":"fred" , "age":3} )
- q = self.consume("q")
- headers = {"name":"fred", "age":3}
- self.assertPublishGet(q, exchange=ex, properties=headers)
- self.channel.message_transfer(destination=ex, body="") # No headers, won't deliver
- self.assertEmpty(q);
-
-
-class RecommendedTypesRuleTests(TestBase, StandardExchangeVerifier):
- """
- The server SHOULD implement these standard exchange types: topic, headers.
-
- Client attempts to declare an exchange with each of these standard types.
- """
-
- def testDirect(self):
- """Declare and test a direct exchange"""
- self.exchange_declare(0, exchange="d", type="direct")
- self.verifyDirectExchange("d")
-
- def testFanout(self):
- """Declare and test a fanout exchange"""
- self.exchange_declare(0, exchange="f", type="fanout")
- self.verifyFanOutExchange("f")
-
- def testTopic(self):
- """Declare and test a topic exchange"""
- self.exchange_declare(0, exchange="t", type="topic")
- self.verifyTopicExchange("t")
-
- def testHeaders(self):
- """Declare and test a headers exchange"""
- self.exchange_declare(0, exchange="h", type="headers")
- self.verifyHeadersExchange("h")
-
-
-class RequiredInstancesRuleTests(TestBase, StandardExchangeVerifier):
- """
- The server MUST, in each virtual host, pre-declare an exchange instance
- for each standard exchange type that it implements, where the name of the
- exchange instance is amq. followed by the exchange type name.
-
- Client creates a temporary queue and attempts to bind to each required
- exchange instance (amq.fanout, amq.direct, and amq.topic, amq.match if
- those types are defined).
- """
- def testAmqDirect(self): self.verifyDirectExchange("amq.direct")
-
- def testAmqFanOut(self): self.verifyFanOutExchange("amq.fanout")
-
- def testAmqTopic(self): self.verifyTopicExchange("amq.topic")
-
- def testAmqMatch(self): self.verifyHeadersExchange("amq.match")
-
-class DefaultExchangeRuleTests(TestBase, StandardExchangeVerifier):
- """
- The server MUST predeclare a direct exchange to act as the default exchange
- for content Publish methods and for default queue bindings.
-
- Client checks that the default exchange is active by specifying a queue
- binding with no exchange name, and publishing a message with a suitable
- routing key but without specifying the exchange name, then ensuring that
- the message arrives in the queue correctly.
- """
- def testDefaultExchange(self):
- # Test automatic binding by queue name.
- self.queue_declare(queue="d")
- self.assertPublishConsume(queue="d", routing_key="d")
- # Test explicit bind to default queue
- self.verifyDirectExchange("")
-
-
-# TODO aconway 2006-09-27: Fill in empty tests:
-
-class DefaultAccessRuleTests(TestBase):
- """
- The server MUST NOT allow clients to access the default exchange except
- by specifying an empty exchange name in the Queue.Bind and content Publish
- methods.
- """
-
-class ExtensionsRuleTests(TestBase):
- """
- The server MAY implement other exchange types as wanted.
- """
-
-
-class DeclareMethodMinimumRuleTests(TestBase):
- """
- The server SHOULD support a minimum of 16 exchanges per virtual host and
- ideally, impose no limit except as defined by available resources.
-
- The client creates as many exchanges as it can until the server reports
- an error; the number of exchanges successfuly created must be at least
- sixteen.
- """
-
-
-class DeclareMethodTicketFieldValidityRuleTests(TestBase):
- """
- The client MUST provide a valid access ticket giving "active" access to
- the realm in which the exchange exists or will be created, or "passive"
- access if the if-exists flag is set.
-
- Client creates access ticket with wrong access rights and attempts to use
- in this method.
- """
-
-
-class DeclareMethodExchangeFieldReservedRuleTests(TestBase):
- """
- Exchange names starting with "amq." are reserved for predeclared and
- standardised exchanges. The client MUST NOT attempt to create an exchange
- starting with "amq.".
-
-
- """
-
-
-class DeclareMethodTypeFieldTypedRuleTests(TestBase):
- """
- Exchanges cannot be redeclared with different types. The client MUST not
- attempt to redeclare an existing exchange with a different type than used
- in the original Exchange.Declare method.
-
-
- """
-
-
-class DeclareMethodTypeFieldSupportRuleTests(TestBase):
- """
- The client MUST NOT attempt to create an exchange with a type that the
- server does not support.
-
-
- """
-
-
-class DeclareMethodPassiveFieldNotFoundRuleTests(TestBase):
- """
- If set, and the exchange does not already exist, the server MUST raise a
- channel exception with reply code 404 (not found).
- """
- def test(self):
- try:
- self.channel.exchange_declare(exchange="humpty_dumpty", passive=True)
- self.fail("Expected 404 for passive declaration of unknown exchange.")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
-
-class DeclareMethodDurableFieldSupportRuleTests(TestBase):
- """
- The server MUST support both durable and transient exchanges.
-
-
- """
-
-
-class DeclareMethodDurableFieldStickyRuleTests(TestBase):
- """
- The server MUST ignore the durable field if the exchange already exists.
-
-
- """
-
-
-class DeclareMethodAutoDeleteFieldStickyRuleTests(TestBase):
- """
- The server MUST ignore the auto-delete field if the exchange already
- exists.
-
-
- """
-
-
-class DeleteMethodTicketFieldValidityRuleTests(TestBase):
- """
- The client MUST provide a valid access ticket giving "active" access
- rights to the exchange's access realm.
-
- Client creates access ticket with wrong access rights and attempts to use
- in this method.
- """
-
-
-class DeleteMethodExchangeFieldExistsRuleTests(TestBase):
- """
- The client MUST NOT attempt to delete an exchange that does not exist.
- """
-
-
-class HeadersExchangeTests(TestBase):
- """
- Tests for headers exchange functionality.
- """
- def setUp(self):
- TestBase.setUp(self)
- self.queue_declare(queue="q")
- self.q = self.consume("q")
-
- def myAssertPublishGet(self, headers):
- self.assertPublishGet(self.q, exchange="amq.match", properties=headers)
-
- def myBasicPublish(self, headers):
- self.channel.message_transfer(destination="amq.match", body="foobar", application_headers=headers)
-
- def testMatchAll(self):
- self.channel.queue_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'all', "name":"fred", "age":3})
- self.myAssertPublishGet({"name":"fred", "age":3})
- self.myAssertPublishGet({"name":"fred", "age":3, "extra":"ignoreme"})
-
- # None of these should match
- self.myBasicPublish({})
- self.myBasicPublish({"name":"barney"})
- self.myBasicPublish({"name":10})
- self.myBasicPublish({"name":"fred", "age":2})
- self.assertEmpty(self.q)
-
- def testMatchAny(self):
- self.channel.queue_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'any', "name":"fred", "age":3})
- self.myAssertPublishGet({"name":"fred"})
- self.myAssertPublishGet({"name":"fred", "ignoreme":10})
- self.myAssertPublishGet({"ignoreme":10, "age":3})
-
- # Wont match
- self.myBasicPublish({})
- self.myBasicPublish({"irrelevant":0})
- self.assertEmpty(self.q)
-
-
-class MiscellaneousErrorsTests(TestBase):
- """
- Test some miscellaneous error conditions
- """
- def testTypeNotKnown(self):
- try:
- self.channel.exchange_declare(exchange="test_type_not_known_exchange", type="invalid_type")
- self.fail("Expected 503 for declaration of unknown exchange type.")
- except Closed, e:
- self.assertConnectionException(503, e.args[0])
-
- def testDifferentDeclaredType(self):
- self.channel.exchange_declare(exchange="test_different_declared_type_exchange", type="direct")
- try:
- self.channel.exchange_declare(exchange="test_different_declared_type_exchange", type="topic")
- self.fail("Expected 530 for redeclaration of exchange with different type.")
- except Closed, e:
- self.assertConnectionException(530, e.args[0])
- #cleanup
- other = self.connect()
- c2 = other.channel(1)
- c2.channel_open()
- c2.exchange_delete(exchange="test_different_declared_type_exchange")
-
diff --git a/python/tests_0-9/message.py b/python/tests_0-9/message.py
deleted file mode 100644
index b25016e680..0000000000
--- a/python/tests_0-9/message.py
+++ /dev/null
@@ -1,657 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-from qpid.client import Client, Closed
-from qpid.queue import Empty
-from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
-from qpid.reference import Reference, ReferenceId
-
-class MessageTests(TestBase):
- """Tests for 'methods' on the amqp message 'class'"""
-
- def test_consume_no_local(self):
- """
- Test that the no_local flag is honoured in the consume method
- """
- channel = self.channel
- #setup, declare two queues:
- channel.queue_declare(queue="test-queue-1a", exclusive=True)
- channel.queue_declare(queue="test-queue-1b", exclusive=True)
- #establish two consumers one of which excludes delivery of locally sent messages
- channel.message_consume(destination="local_included", queue="test-queue-1a")
- channel.message_consume(destination="local_excluded", queue="test-queue-1b", no_local=True)
-
- #send a message
- channel.message_transfer(routing_key="test-queue-1a", body="consume_no_local")
- channel.message_transfer(routing_key="test-queue-1b", body="consume_no_local")
-
- #check the queues of the two consumers
- excluded = self.client.queue("local_excluded")
- included = self.client.queue("local_included")
- msg = included.get(timeout=1)
- self.assertEqual("consume_no_local", msg.body)
- try:
- excluded.get(timeout=1)
- self.fail("Received locally published message though no_local=true")
- except Empty: None
-
-
- def test_consume_exclusive(self):
- """
- Test that the exclusive flag is honoured in the consume method
- """
- channel = self.channel
- #setup, declare a queue:
- channel.queue_declare(queue="test-queue-2", exclusive=True)
-
- #check that an exclusive consumer prevents other consumer being created:
- channel.message_consume(destination="first", queue="test-queue-2", exclusive=True)
- try:
- channel.message_consume(destination="second", queue="test-queue-2")
- self.fail("Expected consume request to fail due to previous exclusive consumer")
- except Closed, e:
- self.assertChannelException(403, e.args[0])
-
- #open new channel and cleanup last consumer:
- channel = self.client.channel(2)
- channel.channel_open()
-
- #check that an exclusive consumer cannot be created if a consumer already exists:
- channel.message_consume(destination="first", queue="test-queue-2")
- try:
- channel.message_consume(destination="second", queue="test-queue-2", exclusive=True)
- self.fail("Expected exclusive consume request to fail due to previous consumer")
- except Closed, e:
- self.assertChannelException(403, e.args[0])
-
- def test_consume_queue_errors(self):
- """
- Test error conditions associated with the queue field of the consume method:
- """
- channel = self.channel
- try:
- #queue specified but doesn't exist:
- channel.message_consume(queue="invalid-queue")
- self.fail("Expected failure when consuming from non-existent queue")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
- channel = self.client.channel(2)
- channel.channel_open()
- try:
- #queue not specified and none previously declared for channel:
- channel.message_consume(queue="")
- self.fail("Expected failure when consuming from unspecified queue")
- except Closed, e:
- self.assertConnectionException(530, e.args[0])
-
- def test_consume_unique_consumers(self):
- """
- Ensure unique consumer tags are enforced
- """
- channel = self.channel
- #setup, declare a queue:
- channel.queue_declare(queue="test-queue-3", exclusive=True)
-
- #check that attempts to use duplicate tags are detected and prevented:
- channel.message_consume(destination="first", queue="test-queue-3")
- try:
- channel.message_consume(destination="first", queue="test-queue-3")
- self.fail("Expected consume request to fail due to non-unique tag")
- except Closed, e:
- self.assertConnectionException(530, e.args[0])
-
- def test_cancel(self):
- """
- Test compliance of the basic.cancel method
- """
- channel = self.channel
- #setup, declare a queue:
- channel.queue_declare(queue="test-queue-4", exclusive=True)
- channel.message_consume(destination="my-consumer", queue="test-queue-4")
- channel.message_transfer(routing_key="test-queue-4", body="One")
-
- #cancel should stop messages being delivered
- channel.message_cancel(destination="my-consumer")
- channel.message_transfer(routing_key="test-queue-4", body="Two")
- myqueue = self.client.queue("my-consumer")
- msg = myqueue.get(timeout=1)
- self.assertEqual("One", msg.body)
- try:
- msg = myqueue.get(timeout=1)
- self.fail("Got message after cancellation: " + msg)
- except Empty: None
-
- #cancellation of non-existant consumers should be handled without error
- channel.message_cancel(destination="my-consumer")
- channel.message_cancel(destination="this-never-existed")
-
-
- def test_ack(self):
- """
- Test basic ack/recover behaviour
- """
- channel = self.channel
- channel.queue_declare(queue="test-ack-queue", exclusive=True)
-
- channel.message_consume(queue="test-ack-queue", destination="consumer_tag", no_ack=False)
- queue = self.client.queue("consumer_tag")
-
- channel.message_transfer(routing_key="test-ack-queue", body="One")
- channel.message_transfer(routing_key="test-ack-queue", body="Two")
- channel.message_transfer(routing_key="test-ack-queue", body="Three")
- channel.message_transfer(routing_key="test-ack-queue", body="Four")
- channel.message_transfer(routing_key="test-ack-queue", body="Five")
-
- msg1 = queue.get(timeout=1)
- msg2 = queue.get(timeout=1)
- msg3 = queue.get(timeout=1)
- msg4 = queue.get(timeout=1)
- msg5 = queue.get(timeout=1)
-
- self.assertEqual("One", msg1.body)
- self.assertEqual("Two", msg2.body)
- self.assertEqual("Three", msg3.body)
- self.assertEqual("Four", msg4.body)
- self.assertEqual("Five", msg5.body)
-
- msg1.ok(batchoffset=1)#One and Two
- msg4.ok()
-
- channel.message_recover(requeue=False)
-
- msg3b = queue.get(timeout=1)
- msg5b = queue.get(timeout=1)
-
- self.assertEqual("Three", msg3b.body)
- self.assertEqual("Five", msg5b.body)
-
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected message: " + extra.body)
- except Empty: None
-
- def test_recover_requeue(self):
- """
- Test requeing on recovery
- """
- channel = self.channel
- channel.queue_declare(queue="test-requeue", exclusive=True)
-
- channel.message_consume(queue="test-requeue", destination="consumer_tag", no_ack=False)
- queue = self.client.queue("consumer_tag")
-
- channel.message_transfer(routing_key="test-requeue", body="One")
- channel.message_transfer(routing_key="test-requeue", body="Two")
- channel.message_transfer(routing_key="test-requeue", body="Three")
- channel.message_transfer(routing_key="test-requeue", body="Four")
- channel.message_transfer(routing_key="test-requeue", body="Five")
-
- msg1 = queue.get(timeout=1)
- msg2 = queue.get(timeout=1)
- msg3 = queue.get(timeout=1)
- msg4 = queue.get(timeout=1)
- msg5 = queue.get(timeout=1)
-
- self.assertEqual("One", msg1.body)
- self.assertEqual("Two", msg2.body)
- self.assertEqual("Three", msg3.body)
- self.assertEqual("Four", msg4.body)
- self.assertEqual("Five", msg5.body)
-
- msg1.ok(batchoffset=1) #One and Two
- msg4.ok() #Four
-
- channel.message_cancel(destination="consumer_tag")
-
- #publish a new message
- channel.message_transfer(routing_key="test-requeue", body="Six")
- #requeue unacked messages (Three and Five)
- channel.message_recover(requeue=True)
-
- channel.message_consume(queue="test-requeue", destination="consumer_tag")
- queue2 = self.client.queue("consumer_tag")
-
- msg3b = queue2.get(timeout=1)
- msg5b = queue2.get(timeout=1)
-
- self.assertEqual("Three", msg3b.body)
- self.assertEqual("Five", msg5b.body)
-
- self.assertEqual(True, msg3b.redelivered)
- self.assertEqual(True, msg5b.redelivered)
-
- self.assertEqual("Six", queue2.get(timeout=1).body)
-
- try:
- extra = queue2.get(timeout=1)
- self.fail("Got unexpected message in second queue: " + extra.body)
- except Empty: None
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected message in original queue: " + extra.body)
- except Empty: None
-
-
- def test_qos_prefetch_count(self):
- """
- Test that the prefetch count specified is honoured
- """
- #setup: declare queue and subscribe
- channel = self.channel
- channel.queue_declare(queue="test-prefetch-count", exclusive=True)
- subscription = channel.message_consume(queue="test-prefetch-count", destination="consumer_tag", no_ack=False)
- queue = self.client.queue("consumer_tag")
-
- #set prefetch to 5:
- channel.message_qos(prefetch_count=5)
-
- #publish 10 messages:
- for i in range(1, 11):
- channel.message_transfer(routing_key="test-prefetch-count", body="Message %d" % i)
-
- #only 5 messages should have been delivered:
- for i in range(1, 6):
- msg = queue.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.body)
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected 6th message in original queue: " + extra.body)
- except Empty: None
-
- #ack messages and check that the next set arrive ok:
- #todo: once batching is implmented, send a single response for all messages
- msg.ok(batchoffset=-4)#1-5
-
- for i in range(6, 11):
- msg = queue.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.body)
-
- msg.ok(batchoffset=-4)#6-10
-
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected 11th message in original queue: " + extra.body)
- except Empty: None
-
-
-
- def test_qos_prefetch_size(self):
- """
- Test that the prefetch size specified is honoured
- """
- #setup: declare queue and subscribe
- channel = self.channel
- channel.queue_declare(queue="test-prefetch-size", exclusive=True)
- subscription = channel.message_consume(queue="test-prefetch-size", destination="consumer_tag", no_ack=False)
- queue = self.client.queue("consumer_tag")
-
- #set prefetch to 50 bytes (each message is 9 or 10 bytes):
- channel.message_qos(prefetch_size=50)
-
- #publish 10 messages:
- for i in range(1, 11):
- channel.message_transfer(routing_key="test-prefetch-size", body="Message %d" % i)
-
- #only 5 messages should have been delivered (i.e. 45 bytes worth):
- for i in range(1, 6):
- msg = queue.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.body)
-
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected 6th message in original queue: " + extra.body)
- except Empty: None
-
- #ack messages and check that the next set arrive ok:
- msg.ok(batchoffset=-4)#1-5
-
- for i in range(6, 11):
- msg = queue.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.body)
-
- msg.ok(batchoffset=-4)#6-10
-
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected 11th message in original queue: " + extra.body)
- except Empty: None
-
- #make sure that a single oversized message still gets delivered
- large = "abcdefghijklmnopqrstuvwxyz"
- large = large + "-" + large;
- channel.message_transfer(routing_key="test-prefetch-size", body=large)
- msg = queue.get(timeout=1)
- self.assertEqual(large, msg.body)
-
- def test_get(self):
- """
- Test message_get method
- """
- channel = self.channel
- channel.queue_declare(queue="test-get", exclusive=True)
-
- #publish some messages (no_ack=True)
- for i in range(1, 11):
- channel.message_transfer(routing_key="test-get", body="Message %d" % i)
-
- #use message_get to read back the messages, and check that we get an empty at the end
- for i in range(1, 11):
- tag = "queue %d" % i
- reply = channel.message_get(no_ack=True, queue="test-get", destination=tag)
- self.assertEqual(reply.method.klass.name, "message")
- self.assertEqual(reply.method.name, "ok")
- self.assertEqual("Message %d" % i, self.client.queue(tag).get(timeout=1).body)
-
- reply = channel.message_get(no_ack=True, queue="test-get")
- self.assertEqual(reply.method.klass.name, "message")
- self.assertEqual(reply.method.name, "empty")
-
- #repeat for no_ack=False
- for i in range(11, 21):
- channel.message_transfer(routing_key="test-get", body="Message %d" % i)
-
- for i in range(11, 21):
- tag = "queue %d" % i
- reply = channel.message_get(no_ack=False, queue="test-get", destination=tag)
- self.assertEqual(reply.method.klass.name, "message")
- self.assertEqual(reply.method.name, "ok")
- msg = self.client.queue(tag).get(timeout=1)
- self.assertEqual("Message %d" % i, msg.body)
-
- if (i==13):
- msg.ok(batchoffset=-2)#11, 12 & 13
- if(i in [15, 17, 19]):
- msg.ok()
-
- reply = channel.message_get(no_ack=True, queue="test-get")
- self.assertEqual(reply.method.klass.name, "message")
- self.assertEqual(reply.method.name, "empty")
-
- #recover(requeue=True)
- channel.message_recover(requeue=True)
-
- #get the unacked messages again (14, 16, 18, 20)
- for i in [14, 16, 18, 20]:
- tag = "queue %d" % i
- reply = channel.message_get(no_ack=False, queue="test-get", destination=tag)
- self.assertEqual(reply.method.klass.name, "message")
- self.assertEqual(reply.method.name, "ok")
- msg = self.client.queue(tag).get(timeout=1)
- self.assertEqual("Message %d" % i, msg.body)
- msg.ok()
- #channel.message_ack(delivery_tag=reply.delivery_tag)
-
- reply = channel.message_get(no_ack=True, queue="test-get")
- self.assertEqual(reply.method.klass.name, "message")
- self.assertEqual(reply.method.name, "empty")
-
- channel.message_recover(requeue=True)
-
- reply = channel.message_get(no_ack=True, queue="test-get")
- self.assertEqual(reply.method.klass.name, "message")
- self.assertEqual(reply.method.name, "empty")
-
- def test_reference_simple(self):
- """
- Test basic ability to handle references
- """
- channel = self.channel
- channel.queue_declare(queue="ref_queue", exclusive=True)
- channel.message_consume(queue="ref_queue", destination="c1")
- queue = self.client.queue("c1")
-
- refId = "myref"
- channel.message_open(reference=refId)
- channel.message_append(reference=refId, bytes="abcd")
- channel.synchronous = False
- ack = channel.message_transfer(routing_key="ref_queue", body=ReferenceId(refId))
- channel.synchronous = True
-
- channel.message_append(reference=refId, bytes="efgh")
- channel.message_append(reference=refId, bytes="ijkl")
- channel.message_close(reference=refId)
-
- #first, wait for the ok for the transfer
- ack.get_response(timeout=1)
-
- self.assertDataEquals(channel, queue.get(timeout=1), "abcdefghijkl")
-
-
- def test_reference_large(self):
- """
- Test basic ability to handle references whose content exceeds max frame size
- """
- channel = self.channel
- self.queue_declare(queue="ref_queue")
-
- #generate a big data string (> max frame size of consumer):
- data = "0123456789"
- for i in range(0, 10):
- data += data
- #send it inline
- channel.synchronous = False
- ack = channel.message_transfer(routing_key="ref_queue", body=data)
- channel.synchronous = True
- #first, wait for the ok for the transfer
- ack.get_response(timeout=1)
-
- #create a new connection for consumer, with specific max frame size (< data)
- other = self.connect(tune_params={"channel_max":10, "frame_max":5120, "heartbeat":0})
- ch2 = other.channel(1)
- ch2.channel_open()
- ch2.message_consume(queue="ref_queue", destination="c1")
- queue = other.queue("c1")
-
- msg = queue.get(timeout=1)
- self.assertTrue(isinstance(msg.body, ReferenceId))
- self.assertTrue(msg.reference)
- self.assertEquals(data, msg.reference.get_complete())
-
- def test_reference_completion(self):
- """
- Test that reference transfer are not deemed complete until
- closed (therefore are not acked or routed until that point)
- """
- channel = self.channel
- channel.queue_declare(queue="ref_queue", exclusive=True)
- channel.message_consume(queue="ref_queue", destination="c1")
- queue = self.client.queue("c1")
-
- refId = "myref"
- channel.message_open(reference=refId)
- channel.message_append(reference=refId, bytes="abcd")
- channel.synchronous = False
- ack = channel.message_transfer(routing_key="ref_queue", body=ReferenceId(refId))
- channel.synchronous = True
-
- try:
- msg = queue.get(timeout=1)
- self.fail("Got unexpected message on queue: " + msg)
- except Empty: None
-
- self.assertTrue(not ack.is_complete())
-
- channel.message_close(reference=refId)
-
- #first, wait for the ok for the transfer
- ack.get_response(timeout=1)
-
- self.assertDataEquals(channel, queue.get(timeout=1), "abcd")
-
- def test_reference_multi_transfer(self):
- """
- Test that multiple transfer requests for the same reference are
- correctly handled.
- """
- channel = self.channel
- #declare and consume from two queues
- channel.queue_declare(queue="q-one", exclusive=True)
- channel.queue_declare(queue="q-two", exclusive=True)
- channel.message_consume(queue="q-one", destination="q-one")
- channel.message_consume(queue="q-two", destination="q-two")
- queue1 = self.client.queue("q-one")
- queue2 = self.client.queue("q-two")
-
- #transfer a single ref to both queues (in separate commands)
- channel.message_open(reference="my-ref")
- channel.synchronous = False
- ack1 = channel.message_transfer(routing_key="q-one", body=ReferenceId("my-ref"))
- channel.message_append(reference="my-ref", bytes="my data")
- ack2 = channel.message_transfer(routing_key="q-two", body=ReferenceId("my-ref"))
- channel.synchronous = True
- channel.message_close(reference="my-ref")
-
- #check that both queues have the message
- self.assertDataEquals(channel, queue1.get(timeout=1), "my data")
- self.assertDataEquals(channel, queue2.get(timeout=1), "my data")
- self.assertEmpty(queue1)
- self.assertEmpty(queue2)
-
- #transfer a single ref to the same queue twice (in separate commands)
- channel.message_open(reference="my-ref")
- channel.synchronous = False
- ack1 = channel.message_transfer(routing_key="q-one", message_id="abc", body=ReferenceId("my-ref"))
- channel.message_append(reference="my-ref", bytes="second message")
- ack2 = channel.message_transfer(routing_key="q-one", message_id="xyz", body=ReferenceId("my-ref"))
- channel.synchronous = True
- channel.message_close(reference="my-ref")
-
- msg1 = queue1.get(timeout=1)
- msg2 = queue1.get(timeout=1)
- #order is undefined
- if msg1.message_id == "abc":
- self.assertEquals(msg2.message_id, "xyz")
- else:
- self.assertEquals(msg1.message_id, "xyz")
- self.assertEquals(msg2.message_id, "abc")
-
- #would be legal for the incoming messages to be transfered
- #inline or by reference in any combination
-
- if isinstance(msg1.body, ReferenceId):
- self.assertEquals("second message", msg1.reference.get_complete())
- if isinstance(msg2.body, ReferenceId):
- if msg1.body != msg2.body:
- self.assertEquals("second message", msg2.reference.get_complete())
- #else ok, as same ref as msg1
- else:
- self.assertEquals("second message", msg1.body)
- if isinstance(msg2.body, ReferenceId):
- self.assertEquals("second message", msg2.reference.get_complete())
- else:
- self.assertEquals("second message", msg2.body)
-
- self.assertEmpty(queue1)
-
- def test_reference_unopened_on_append_error(self):
- channel = self.channel
- try:
- channel.message_append(reference="unopened")
- except Closed, e:
- self.assertConnectionException(503, e.args[0])
-
- def test_reference_unopened_on_close_error(self):
- channel = self.channel
- try:
- channel.message_close(reference="unopened")
- except Closed, e:
- self.assertConnectionException(503, e.args[0])
-
- def test_reference_unopened_on_transfer_error(self):
- channel = self.channel
- try:
- channel.message_transfer(body=ReferenceId("unopened"))
- except Closed, e:
- self.assertConnectionException(503, e.args[0])
-
- def test_reference_already_opened_error(self):
- channel = self.channel
- channel.message_open(reference="a")
- try:
- channel.message_open(reference="a")
- except Closed, e:
- self.assertConnectionException(503, e.args[0])
-
- def test_empty_reference(self):
- channel = self.channel
- channel.queue_declare(queue="ref_queue", exclusive=True)
- channel.message_consume(queue="ref_queue", destination="c1")
- queue = self.client.queue("c1")
-
- refId = "myref"
- channel.message_open(reference=refId)
- channel.synchronous = False
- ack = channel.message_transfer(routing_key="ref_queue", message_id="empty-msg", body=ReferenceId(refId))
- channel.synchronous = True
- channel.message_close(reference=refId)
-
- #first, wait for the ok for the transfer
- ack.get_response(timeout=1)
-
- msg = queue.get(timeout=1)
- self.assertEquals(msg.message_id, "empty-msg")
- self.assertDataEquals(channel, msg, "")
-
- def test_reject(self):
- channel = self.channel
- channel.queue_declare(queue = "q", exclusive=True)
-
- channel.message_consume(queue = "q", destination = "consumer")
- channel.message_transfer(routing_key = "q", body="blah, blah")
- msg = self.client.queue("consumer").get(timeout = 1)
- self.assertEquals(msg.body, "blah, blah")
- channel.message_cancel(destination = "consumer")
- msg.reject()
-
- channel.message_consume(queue = "q", destination = "checker")
- msg = self.client.queue("checker").get(timeout = 1)
- self.assertEquals(msg.body, "blah, blah")
-
- def test_checkpoint(self):
- channel = self.channel
- channel.queue_declare(queue = "q", exclusive=True)
-
- channel.message_open(reference="my-ref")
- channel.message_append(reference="my-ref", bytes="abcdefgh")
- channel.message_append(reference="my-ref", bytes="ijklmnop")
- channel.message_checkpoint(reference="my-ref", identifier="my-checkpoint")
- channel.channel_close()
-
- channel = self.client.channel(2)
- channel.channel_open()
- channel.message_consume(queue = "q", destination = "consumer")
- offset = channel.message_resume(reference="my-ref", identifier="my-checkpoint").value
- self.assertTrue(offset<=16)
- channel.message_append(reference="my-ref", bytes="qrstuvwxyz")
- channel.synchronous = False
- channel.message_transfer(routing_key="q-one", message_id="abcd", body=ReferenceId("my-ref"))
- channel.synchronous = True
- channel.message_close(reference="my-ref")
-
- self.assertDataEquals(channel, self.client.queue("consumer").get(timeout = 1), "abcdefghijklmnopqrstuvwxyz")
- self.assertEmpty(self.client.queue("consumer"))
-
-
- def assertDataEquals(self, channel, msg, expected):
- if isinstance(msg.body, ReferenceId):
- data = msg.reference.get_complete()
- else:
- data = msg.body
- self.assertEquals(expected, data)
diff --git a/python/tests_0-9/query.py b/python/tests_0-9/query.py
index c2e08c003c..cb66d079e5 100644
--- a/python/tests_0-9/query.py
+++ b/python/tests_0-9/query.py
@@ -19,7 +19,7 @@
from qpid.client import Client, Closed
from qpid.queue import Empty
from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
+from qpid.testlib import TestBase
class QueryTests(TestBase):
"""Tests for various query methods introduced in 0-10 and available in 0-9 for preview"""
diff --git a/python/tests_0-9/queue.py b/python/tests_0-9/queue.py
index e7fe0b3ed4..de1153307c 100644
--- a/python/tests_0-9/queue.py
+++ b/python/tests_0-9/queue.py
@@ -6,9 +6,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,137 +19,11 @@
from qpid.client import Client, Closed
from qpid.queue import Empty
from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
+from qpid.testlib import TestBase
class QueueTests(TestBase):
"""Tests for 'methods' on the amqp queue 'class'"""
- def test_purge(self):
- """
- Test that the purge method removes messages from the queue
- """
- channel = self.channel
- #setup, declare a queue and add some messages to it:
- channel.exchange_declare(exchange="test-exchange", type="direct")
- channel.queue_declare(queue="test-queue", exclusive=True)
- channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
- channel.message_transfer(destination="test-exchange", routing_key="key", body="one")
- channel.message_transfer(destination="test-exchange", routing_key="key", body="two")
- channel.message_transfer(destination="test-exchange", routing_key="key", body="three")
-
- #check that the queue now reports 3 messages:
- reply = channel.queue_declare(queue="test-queue")
- self.assertEqual(3, reply.message_count)
-
- #now do the purge, then test that three messages are purged and the count drops to 0
- reply = channel.queue_purge(queue="test-queue");
- self.assertEqual(3, reply.message_count)
- reply = channel.queue_declare(queue="test-queue")
- self.assertEqual(0, reply.message_count)
-
- #send a further message and consume it, ensuring that the other messages are really gone
- channel.message_transfer(destination="test-exchange", routing_key="key", body="four")
- channel.message_consume(queue="test-queue", destination="tag", no_ack=True)
- queue = self.client.queue("tag")
- msg = queue.get(timeout=1)
- self.assertEqual("four", msg.body)
-
- #check error conditions (use new channels):
- channel = self.client.channel(2)
- channel.channel_open()
- try:
- #queue specified but doesn't exist:
- channel.queue_purge(queue="invalid-queue")
- self.fail("Expected failure when purging non-existent queue")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
- channel = self.client.channel(3)
- channel.channel_open()
- try:
- #queue not specified and none previously declared for channel:
- channel.queue_purge()
- self.fail("Expected failure when purging unspecified queue")
- except Closed, e:
- self.assertConnectionException(530, e.args[0])
-
- #cleanup
- other = self.connect()
- channel = other.channel(1)
- channel.channel_open()
- channel.exchange_delete(exchange="test-exchange")
-
- def test_declare_exclusive(self):
- """
- Test that the exclusive field is honoured in queue.declare
- """
- # TestBase.setUp has already opened channel(1)
- c1 = self.channel
- # Here we open a second separate connection:
- other = self.connect()
- c2 = other.channel(1)
- c2.channel_open()
-
- #declare an exclusive queue:
- c1.queue_declare(queue="exclusive-queue", exclusive="True")
- try:
- #other connection should not be allowed to declare this:
- c2.queue_declare(queue="exclusive-queue", exclusive="True")
- self.fail("Expected second exclusive queue_declare to raise a channel exception")
- except Closed, e:
- self.assertChannelException(405, e.args[0])
-
-
- def test_declare_passive(self):
- """
- Test that the passive field is honoured in queue.declare
- """
- channel = self.channel
- #declare an exclusive queue:
- channel.queue_declare(queue="passive-queue-1", exclusive="True")
- channel.queue_declare(queue="passive-queue-1", passive="True")
- try:
- #other connection should not be allowed to declare this:
- channel.queue_declare(queue="passive-queue-2", passive="True")
- self.fail("Expected passive declaration of non-existant queue to raise a channel exception")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
-
- def test_bind(self):
- """
- Test various permutations of the queue.bind method
- """
- channel = self.channel
- channel.queue_declare(queue="queue-1", exclusive="True")
-
- #straightforward case, both exchange & queue exist so no errors expected:
- channel.queue_bind(queue="queue-1", exchange="amq.direct", routing_key="key1")
-
- #bind the default queue for the channel (i.e. last one declared):
- channel.queue_bind(exchange="amq.direct", routing_key="key2")
-
- #use the queue name where neither routing key nor queue are specified:
- channel.queue_bind(exchange="amq.direct")
-
- #try and bind to non-existant exchange
- try:
- channel.queue_bind(queue="queue-1", exchange="an-invalid-exchange", routing_key="key1")
- self.fail("Expected bind to non-existant exchange to fail")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
- #need to reopen a channel:
- channel = self.client.channel(2)
- channel.channel_open()
-
- #try and bind non-existant queue:
- try:
- channel.queue_bind(queue="queue-2", exchange="amq.direct", routing_key="key1")
- self.fail("Expected bind of non-existant queue to fail")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
def test_unbind_direct(self):
self.unbind_test(exchange="amq.direct", routing_key="key")
@@ -165,12 +39,12 @@ class QueueTests(TestBase):
def unbind_test(self, exchange, routing_key="", args=None, headers={}):
#bind two queues and consume from them
channel = self.channel
-
+
channel.queue_declare(queue="queue-1", exclusive="True")
channel.queue_declare(queue="queue-2", exclusive="True")
- channel.message_consume(queue="queue-1", destination="queue-1", no_ack=True)
- channel.message_consume(queue="queue-2", destination="queue-2", no_ack=True)
+ channel.basic_consume(queue="queue-1", consumer_tag="queue-1", no_ack=True)
+ channel.basic_consume(queue="queue-2", consumer_tag="queue-2", no_ack=True)
queue1 = self.client.queue("queue-1")
queue2 = self.client.queue("queue-2")
@@ -179,130 +53,29 @@ class QueueTests(TestBase):
channel.queue_bind(exchange=exchange, queue="queue-2", routing_key=routing_key, arguments=args)
#send a message that will match both bindings
- channel.message_transfer(destination=exchange, routing_key=routing_key, application_headers=headers, body="one")
-
+ channel.basic_publish(exchange=exchange, routing_key=routing_key,
+ content=Content("one", properties={"headers": headers}))
+
#unbind first queue
channel.queue_unbind(exchange=exchange, queue="queue-1", routing_key=routing_key, arguments=args)
-
+
#send another message
- channel.message_transfer(destination=exchange, routing_key=routing_key, application_headers=headers, body="two")
+ channel.basic_publish(exchange=exchange, routing_key=routing_key,
+ content=Content("two", properties={"headers": headers}))
#check one queue has both messages and the other has only one
- self.assertEquals("one", queue1.get(timeout=1).body)
+ self.assertEquals("one", queue1.get(timeout=1).content.body)
try:
msg = queue1.get(timeout=1)
self.fail("Got extra message: %s" % msg.body)
except Empty: pass
- self.assertEquals("one", queue2.get(timeout=1).body)
- self.assertEquals("two", queue2.get(timeout=1).body)
+ self.assertEquals("one", queue2.get(timeout=1).content.body)
+ self.assertEquals("two", queue2.get(timeout=1).content.body)
try:
msg = queue2.get(timeout=1)
self.fail("Got extra message: " + msg)
- except Empty: pass
-
-
- def test_delete_simple(self):
- """
- Test core queue deletion behaviour
- """
- channel = self.channel
-
- #straight-forward case:
- channel.queue_declare(queue="delete-me")
- channel.message_transfer(routing_key="delete-me", body="a")
- channel.message_transfer(routing_key="delete-me", body="b")
- channel.message_transfer(routing_key="delete-me", body="c")
- reply = channel.queue_delete(queue="delete-me")
- self.assertEqual(3, reply.message_count)
- #check that it has gone be declaring passively
- try:
- channel.queue_declare(queue="delete-me", passive="True")
- self.fail("Queue has not been deleted")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
- #check attempted deletion of non-existant queue is handled correctly:
- channel = self.client.channel(2)
- channel.channel_open()
- try:
- channel.queue_delete(queue="i-dont-exist", if_empty="True")
- self.fail("Expected delete of non-existant queue to fail")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
-
-
- def test_delete_ifempty(self):
- """
- Test that if_empty field of queue_delete is honoured
- """
- channel = self.channel
-
- #create a queue and add a message to it (use default binding):
- channel.queue_declare(queue="delete-me-2")
- channel.queue_declare(queue="delete-me-2", passive="True")
- channel.message_transfer(routing_key="delete-me-2", body="message")
-
- #try to delete, but only if empty:
- try:
- channel.queue_delete(queue="delete-me-2", if_empty="True")
- self.fail("Expected delete if_empty to fail for non-empty queue")
- except Closed, e:
- self.assertChannelException(406, e.args[0])
-
- #need new channel now:
- channel = self.client.channel(2)
- channel.channel_open()
-
- #empty queue:
- channel.message_consume(destination="consumer_tag", queue="delete-me-2", no_ack=True)
- queue = self.client.queue("consumer_tag")
- msg = queue.get(timeout=1)
- self.assertEqual("message", msg.body)
- channel.message_cancel(destination="consumer_tag")
-
- #retry deletion on empty queue:
- channel.queue_delete(queue="delete-me-2", if_empty="True")
-
- #check that it has gone by declaring passively:
- try:
- channel.queue_declare(queue="delete-me-2", passive="True")
- self.fail("Queue has not been deleted")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
- def test_delete_ifunused(self):
- """
- Test that if_unused field of queue_delete is honoured
- """
- channel = self.channel
-
- #create a queue and register a consumer:
- channel.queue_declare(queue="delete-me-3")
- channel.queue_declare(queue="delete-me-3", passive="True")
- channel.message_consume(destination="consumer_tag", queue="delete-me-3", no_ack=True)
-
- #need new channel now:
- channel2 = self.client.channel(2)
- channel2.channel_open()
- #try to delete, but only if empty:
- try:
- channel2.queue_delete(queue="delete-me-3", if_unused="True")
- self.fail("Expected delete if_unused to fail for queue with existing consumer")
- except Closed, e:
- self.assertChannelException(406, e.args[0])
-
-
- channel.message_cancel(destination="consumer_tag")
- channel.queue_delete(queue="delete-me-3", if_unused="True")
- #check that it has gone by declaring passively:
- try:
- channel.queue_declare(queue="delete-me-3", passive="True")
- self.fail("Queue has not been deleted")
- except Closed, e:
- self.assertChannelException(404, e.args[0])
-
+ except Empty: pass
def test_autodelete_shared(self):
"""
@@ -336,5 +109,3 @@ class QueueTests(TestBase):
self.fail("Expected queue to have been deleted")
except Closed, e:
self.assertChannelException(404, e.args[0])
-
-
diff --git a/python/tests_0-9/testlib.py b/python/tests_0-9/testlib.py
deleted file mode 100644
index f345fbbd80..0000000000
--- a/python/tests_0-9/testlib.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# Tests for the testlib itself.
-#
-
-from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
-from Queue import Empty
-
-import sys
-from traceback import *
-
-def mytrace(frame, event, arg):
- print_stack(frame);
- print "===="
- return mytrace
-
-class TestBaseTest(TestBase):
- """Verify TestBase functions work as expected"""
-
- def testAssertEmptyPass(self):
- """Test assert empty works"""
- self.queue_declare(queue="empty")
- q = self.consume("empty")
- self.assertEmpty(q)
- try:
- q.get(timeout=1)
- self.fail("Queue is not empty.")
- except Empty: None # Ignore
-
- def testAssertEmptyFail(self):
- self.queue_declare(queue="full")
- q = self.consume("full")
- self.channel.message_transfer(routing_key="full", body="")
- try:
- self.assertEmpty(q);
- self.fail("assertEmpty did not assert on non-empty queue")
- except AssertionError: None # Ignore
-
- def testMessageProperties(self):
- """Verify properties are passed with message"""
- props={"x":1, "y":2}
- self.queue_declare(queue="q")
- q = self.consume("q")
- self.assertPublishGet(q, routing_key="q", properties=props)
-
-
-
diff --git a/python/tests_0-9/tx.py b/python/tests_0-9/tx.py
deleted file mode 100644
index 0f6b4f5cd1..0000000000
--- a/python/tests_0-9/tx.py
+++ /dev/null
@@ -1,188 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-from qpid.client import Client, Closed
-from qpid.queue import Empty
-from qpid.content import Content
-from qpid.testlib import testrunner, TestBase
-
-class TxTests(TestBase):
- """
- Tests for 'methods' on the amqp tx 'class'
- """
-
- def test_commit(self):
- """
- Test that commited publishes are delivered and commited acks are not re-delivered
- """
- channel = self.channel
- queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-commit-a", "tx-commit-b", "tx-commit-c")
- channel.tx_commit()
-
- #check results
- for i in range(1, 5):
- msg = queue_c.get(timeout=1)
- self.assertEqual("TxMessage %d" % i, msg.body)
- msg.ok()
-
- msg = queue_b.get(timeout=1)
- self.assertEqual("TxMessage 6", msg.body)
- msg.ok()
-
- msg = queue_a.get(timeout=1)
- self.assertEqual("TxMessage 7", msg.body)
- msg.ok()
-
- for q in [queue_a, queue_b, queue_c]:
- try:
- extra = q.get(timeout=1)
- self.fail("Got unexpected message: " + extra.body)
- except Empty: None
-
- #cleanup
- channel.tx_commit()
-
- def test_auto_rollback(self):
- """
- Test that a channel closed with an open transaction is effectively rolled back
- """
- channel = self.channel
- queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c")
-
- for q in [queue_a, queue_b, queue_c]:
- try:
- extra = q.get(timeout=1)
- self.fail("Got unexpected message: " + extra.body)
- except Empty: None
-
- channel.tx_rollback()
-
- #check results
- for i in range(1, 5):
- msg = queue_a.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.body)
- msg.ok()
-
- msg = queue_b.get(timeout=1)
- self.assertEqual("Message 6", msg.body)
- msg.ok()
-
- msg = queue_c.get(timeout=1)
- self.assertEqual("Message 7", msg.body)
- msg.ok()
-
- for q in [queue_a, queue_b, queue_c]:
- try:
- extra = q.get(timeout=1)
- self.fail("Got unexpected message: " + extra.body)
- except Empty: None
-
- #cleanup
- channel.tx_commit()
-
- def test_rollback(self):
- """
- Test that rolled back publishes are not delivered and rolled back acks are re-delivered
- """
- channel = self.channel
- queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-rollback-a", "tx-rollback-b", "tx-rollback-c")
-
- for q in [queue_a, queue_b, queue_c]:
- try:
- extra = q.get(timeout=1)
- self.fail("Got unexpected message: " + extra.body)
- except Empty: None
-
- channel.tx_rollback()
-
- #check results
- for i in range(1, 5):
- msg = queue_a.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.body)
- msg.ok()
-
- msg = queue_b.get(timeout=1)
- self.assertEqual("Message 6", msg.body)
- msg.ok()
-
- msg = queue_c.get(timeout=1)
- self.assertEqual("Message 7", msg.body)
- msg.ok()
-
- for q in [queue_a, queue_b, queue_c]:
- try:
- extra = q.get(timeout=1)
- self.fail("Got unexpected message: " + extra.body)
- except Empty: None
-
- #cleanup
- channel.tx_commit()
-
- def perform_txn_work(self, channel, name_a, name_b, name_c):
- """
- Utility method that does some setup and some work under a transaction. Used for testing both
- commit and rollback
- """
- #setup:
- channel.queue_declare(queue=name_a, exclusive=True)
- channel.queue_declare(queue=name_b, exclusive=True)
- channel.queue_declare(queue=name_c, exclusive=True)
-
- key = "my_key_" + name_b
- topic = "my_topic_" + name_c
-
- channel.queue_bind(queue=name_b, exchange="amq.direct", routing_key=key)
- channel.queue_bind(queue=name_c, exchange="amq.topic", routing_key=topic)
-
- for i in range(1, 5):
- channel.message_transfer(routing_key=name_a, body="Message %d" % i)
-
- channel.message_transfer(routing_key=key, destination="amq.direct", body="Message 6")
- channel.message_transfer(routing_key=topic, destination="amq.topic", body="Message 7")
-
- channel.tx_select()
-
- #consume and ack messages
- channel.message_consume(queue=name_a, destination="sub_a", no_ack=False)
- queue_a = self.client.queue("sub_a")
- for i in range(1, 5):
- msg = queue_a.get(timeout=1)
- self.assertEqual("Message %d" % i, msg.body)
-
- msg.ok(batchoffset=-3)
-
- channel.message_consume(queue=name_b, destination="sub_b", no_ack=False)
- queue_b = self.client.queue("sub_b")
- msg = queue_b.get(timeout=1)
- self.assertEqual("Message 6", msg.body)
- msg.ok()
-
- sub_c = channel.message_consume(queue=name_c, destination="sub_c", no_ack=False)
- queue_c = self.client.queue("sub_c")
- msg = queue_c.get(timeout=1)
- self.assertEqual("Message 7", msg.body)
- msg.ok()
-
- #publish messages
- for i in range(1, 5):
- channel.message_transfer(routing_key=topic, destination="amq.topic", body="TxMessage %d" % i)
-
- channel.message_transfer(routing_key=key, destination="amq.direct", body="TxMessage 6")
- channel.message_transfer(routing_key=name_a, body="TxMessage 7")
-
- return queue_a, queue_b, queue_c
diff --git a/python/todo.txt b/python/todo.txt
new file mode 100644
index 0000000000..7b3ca90638
--- /dev/null
+++ b/python/todo.txt
@@ -0,0 +1,188 @@
+Key:
+ F = Functional
+ PF = Partially Functional
+ NR = Needs Additional Review
+ ND = Needs Additional Design
+ NF = Not Functional
+
+Connection:
+
+ variables/configuration:
+
+ - reconnect: F, NR, ND
+ + reconnect functionality is done and the API semantics provided
+ are ready for review
+ + reconnect policies need to be finished, there is currently
+ only one hardcoded reconnect policy (retry every three
+ seconds), we need to define the pre-canned policies that we
+ want to support and a means to configure them, as well as a
+ very simple plugin/callback for defining ad-hoc policies
+ + need to feed failover exchange into the reconnect policy
+ + acks can be lost on reconnect
+ + handle reconnect during commit/rollback
+
+ - timeout: NF
+ + some sort of timeout threshold for killing the connection
+
+ methods:
+
+ - open/__init__: F, ND
+ + need to support kerberos
+ + need a better way of supplying various kinds of configuration:
+ - authentication info
+ - transport specific configuration options, e.g
+ - heartbeat
+ - socket options
+ - tcp-nodelay
+ - multiple brokers
+
+ - session: F, NR
+
+ - connect: F, NR
+
+ - disconnect: F, NR
+
+ - connected: F, NR
+
+ - close: F, NR, ND
+ + currently there is no distinction between a "close" that does
+ a complete handshake with the remote broker, and a "close"
+ that reclaims resources, this means that close can fail with
+ an exception, I don't like this as it is unclear to the user
+ if there is a responsibility to do further cleanup in this
+ case
+
+ errors:
+
+ - ConnectionError: F, NR
+ + ConnectError F, NR
+ + Disconnected F, NR
+
+ - notification of disconnect?
+
+Session:
+
+ methods:
+
+ - sender: F, NR, ND
+ + need to detail address options
+ + need to define subject pattern semantics
+ + consider providing convenience for sender/receiver caching
+
+ - receiver: F, NR, ND
+ + need to detail address options
+ + need to define filter syntax/semantics
+ + consider providing convenience for sender/receiver caching
+
+ - acknowledge: F, NR
+
+ - reject: NF
+
+ - release: NF
+
+ - commit: F, NR
+
+ - rollback: F, NR
+
+ - next_receiver: F, NR
+
+ - close: F, ND
+ + see comment on Connection.close
+
+ errors:
+
+ - SessionError: F, NR, ND
+ + SendError: F, NR, ND
+ + ReceiveError: F, NR, ND
+ + should there be fatal/non fatal variants?
+
+Sender:
+
+ methods:
+
+ - pending: F, NR
+
+ - send: F, NR
+
+ - sync: F, NR, ND
+ + currently this blocks until pending == 0, I'm thinking of
+ renaming this to wait and adding a slightly richer interface
+ that would let you wait for something like pending < n
+
+ - close: F, NR
+
+ errors:
+
+ - SendError
+ + InsufficientCapacity
+ + need specific subhierarchy for certain conditions, e.g. no such queue
+
+Receiver:
+
+ methods:
+
+ - pending: F, NR
+
+ - listen: F, ND
+ + see comment on Session.fetch
+
+ - fetch: F, NR, ND
+ + explicit grant for receiver
+ + changing capacity/prefetch to issue credit on ack rather than
+ fetch return
+
+ - sync/wait: NF
+
+ - close: F, NR
+
+ errors:
+
+ - ReceiveError
+ + Empty
+ + need specific subhierarchy for certain conditions, e.g. no such queue
+
+Message:
+
+ - standard message properties: F, NR, ND
+
+ - map messages: F, NR
+ + needs interop testing: NF
+ + needs java impl: NF
+
+ - list messages: F, NR, NI
+ + needs interop testing: NF
+ + needs java impl: NF
+
+ - boxed types: NF
+
+Address:
+
+ - syntax: F, NR
+ - subject related changes, e.g. allowing patterns on both ends: NF
+ - creating/deleting queues/exchanges F, NR
+ + need to handle cleanup of temp queues/topics: F, NR
+ + passthrough options for creating exchanges/queues: F, NR
+ - integration with java: NF
+ - queue browsing: NF
+ - temporary queues: NF
+ - xquery: NF
+
+Testing:
+ - stress/soak testing for async: NF
+ - stress/soak testing for reconnect: NF
+ - interop testing: NF
+ - multi session and multi connection client tests: NF
+
+Documentation:
+ - api level docs largely present but need updating and elaboration
+ - tutorial: NF
+
+Examples:
+ - drain: F, NR
+ - spout: F, NR
+ - server: F, NR
+ - client: NF
+ - other examples, e.g. async?
+
+Miscellaneous:
+ - standard ping-like (drain/spout) utilities for all clients: NF