qpid-python-0.22/0000755000175000017500000000000012243755326012002 5ustar mbambaqpid-python-0.22/.pc/0000755000175000017500000000000012243755010012450 5ustar mbambaqpid-python-0.22/.pc/applied-patches0000644000175000017500000000000012243755010015424 0ustar mbambaqpid-python-0.22/.pc/.version0000644000175000017500000000000212243755010014126 0ustar mbamba2 qpid-python-0.22/.pc/.quilt_patches0000644000175000017500000000001712243755010015314 0ustar mbambadebian/patches qpid-python-0.22/.pc/.quilt_series0000644000175000017500000000000712243755010015156 0ustar mbambaseries qpid-python-0.22/python/0000755000175000017500000000000012151237730013313 5ustar mbambaqpid-python-0.22/python/qpid-python-test0000755000175000017500000004021111656710346016500 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # TODO: summarize, test harness preconditions (e.g. broker is alive) import logging, optparse, os, struct, sys, time, traceback, types from fnmatch import fnmatchcase as match from getopt import GetoptError from logging import getLogger, StreamHandler, Formatter, Filter, \ WARN, DEBUG, ERROR from qpid.harness import Skipped from qpid.util import URL levels = { "DEBUG": DEBUG, "WARN": WARN, "ERROR": ERROR } sorted_levels = [(v, k) for k, v in levels.items()] sorted_levels.sort() sorted_levels = [v for k, v in sorted_levels] parser = optparse.OptionParser(usage="usage: %prog [options] PATTERN ...", description="Run tests matching the specified PATTERNs.") parser.add_option("-l", "--list", action="store_true", default=False, help="list tests instead of executing them") parser.add_option("-b", "--broker", default="localhost", help="run tests against BROKER (default %default)") parser.add_option("-f", "--log-file", metavar="FILE", help="log output to FILE") parser.add_option("-v", "--log-level", metavar="LEVEL", default="WARN", help="only display log messages of LEVEL or higher severity: " "%s (default %%default)" % ", ".join(sorted_levels)) parser.add_option("-c", "--log-category", metavar="CATEGORY", action="append", dest="log_categories", default=[], help="log only categories matching CATEGORY pattern") parser.add_option("-m", "--module", action="append", default=[], dest="modules", help="add module to test search path") parser.add_option("-i", "--ignore", action="append", default=[], help="ignore tests matching IGNORE pattern") parser.add_option("-I", "--ignore-file", metavar="IFILE", action="append", default=[], help="ignore tests matching patterns in IFILE") parser.add_option("-H", "--halt-on-error", action="store_true", default=False, dest="hoe", help="halt if an error is encountered") parser.add_option("-t", "--time", action="store_true", default=False, help="report timing information on test run") parser.add_option("-D", "--define", metavar="DEFINE", dest="defines", action="append", default=[], help="define test parameters") parser.add_option("-x", "--xml", metavar="XML", dest="xml", help="write test results in Junit style xml suitable for use by CI tools etc") class Config: def __init__(self): self.broker = URL("localhost") self.defines = {} self.log_file = None self.log_level = WARN self.log_categories = [] opts, args = parser.parse_args() includes = [] excludes = ["*__*__"] config = Config() list_only = opts.list config.broker = URL(opts.broker) for d in opts.defines: try: idx = d.index("=") name = d[:idx] value = d[idx+1:] config.defines[name] = value except ValueError: config.defines[d] = None config.log_file = opts.log_file config.log_level = levels[opts.log_level.upper()] config.log_categories = opts.log_categories excludes.extend([v.strip() for v in opts.ignore]) for v in opts.ignore_file: f = open(v) for line in f: line = line.strip() if line.startswith("#"): continue excludes.append(line) f.close() for a in args: includes.append(a.strip()) if not includes: if opts.modules: includes.append("*") else: includes.extend(["qpid.tests.*"]) def is_ignored(path): for p in excludes: if match(path, p): return True return False def is_included(path): if is_ignored(path): return False for p in includes: if match(path, p): return True return False def is_smart(): return sys.stdout.isatty() and os.environ.get("TERM", "dumb") != "dumb" try: import fcntl, termios def width(): if is_smart(): s = struct.pack("HHHH", 0, 0, 0, 0) fd_stdout = sys.stdout.fileno() x = fcntl.ioctl(fd_stdout, termios.TIOCGWINSZ, s) rows, cols, xpx, ypx = struct.unpack("HHHH", x) return cols else: try: return int(os.environ.get("COLUMNS", "80")) except ValueError: return 80 WIDTH = width() def resize(sig, frm): global WIDTH WIDTH = width() import signal signal.signal(signal.SIGWINCH, resize) except ImportError: WIDTH = 80 def vt100_attrs(*attrs): return "\x1B[%sm" % ";".join(map(str, attrs)) vt100_reset = vt100_attrs(0) KEYWORDS = {"pass": (32,), "skip": (33,), "fail": (31,), "start": (34,), "total": (34,), "ignored": (33,), "selected": (34,), "elapsed": (34,), "average": (34,)} COLORIZE = is_smart() def colorize_word(word, text=None): if text is None: text = word return colorize(text, *KEYWORDS.get(word, ())) def colorize(text, *attrs): if attrs and COLORIZE: return "%s%s%s" % (vt100_attrs(*attrs), text, vt100_reset) else: return text def indent(text): lines = text.split("\n") return " %s" % "\n ".join(lines) # Write a 'minimal' Junit xml style report file suitable for use by CI tools such as Jenkins. class JunitXmlStyleReporter: def __init__(self, file): self.f = open(file, "w"); def begin(self): self.f.write('\n') self.f.write('\n') def report(self, name, result): parts = name.split(".") method = parts[-1] module = '.'.join(parts[0:-1]) self.f.write('\n' % (module, method, result.time)) if result.failed: self.f.write('\n') self.f.write('\n') self.f.write('\n') self.f.write('\n') def end(self): self.f.write('\n') self.f.close() class Interceptor: def __init__(self): self.newline = False self.indent = False self.passthrough = True self.dirty = False self.last = None def begin(self): self.newline = True self.indent = True self.passthrough = False self.dirty = False self.last = None def reset(self): self.newline = False self.indent = False self.passthrough = True class StreamWrapper: def __init__(self, interceptor, stream, prefix=" "): self.interceptor = interceptor self.stream = stream self.prefix = prefix def fileno(self): return self.stream.fileno() def isatty(self): return self.stream.isatty() def write(self, s): if self.interceptor.passthrough: self.stream.write(s) return if s: self.interceptor.dirty = True if self.interceptor.newline: self.interceptor.newline = False self.stream.write(" %s\n" % colorize_word("start")) self.interceptor.indent = True if self.interceptor.indent: self.stream.write(self.prefix) if s.endswith("\n"): s = s.replace("\n", "\n%s" % self.prefix)[:-2] self.interceptor.indent = True else: s = s.replace("\n", "\n%s" % self.prefix) self.interceptor.indent = False self.stream.write(s) if s: self.interceptor.last = s[-1] def flush(self): self.stream.flush() interceptor = Interceptor() out_wrp = StreamWrapper(interceptor, sys.stdout) err_wrp = StreamWrapper(interceptor, sys.stderr) out = sys.stdout err = sys.stderr sys.stdout = out_wrp sys.stderr = err_wrp class PatternFilter(Filter): def __init__(self, *patterns): Filter.__init__(self, patterns) self.patterns = patterns def filter(self, record): if not self.patterns: return True for p in self.patterns: if match(record.name, p): return True return False root = getLogger() handler = StreamHandler(sys.stdout) filter = PatternFilter(*config.log_categories) handler.addFilter(filter) handler.setFormatter(Formatter("%(asctime)s %(levelname)s %(message)s")) root.addHandler(handler) root.setLevel(WARN) log = getLogger("qpid.test") PASS = "pass" SKIP = "skip" FAIL = "fail" class Runner: def __init__(self): self.exceptions = [] self.skip = False def passed(self): return not self.exceptions def skipped(self): return self.skip def failed(self): return self.exceptions and not self.skip def halt(self): return self.exceptions or self.skip def run(self, name, phase): try: phase() except KeyboardInterrupt: raise except: exi = sys.exc_info() if issubclass(exi[0], Skipped): self.skip = True self.exceptions.append((name, exi)) def status(self): if self.passed(): return PASS elif self.skipped(): return SKIP elif self.failed(): return FAIL else: return None def get_formatted_exceptions(self): for name, info in self.exceptions: if issubclass(info[0], Skipped): output = indent("".join(traceback.format_exception_only(*info[:2]))).rstrip() else: output = "Error during %s:" % name output += indent("".join(traceback.format_exception(*info))).rstrip() return output ST_WIDTH = 8 def run_test(name, test, config): patterns = filter.patterns level = root.level filter.patterns = config.log_categories root.setLevel(config.log_level) parts = name.split(".") line = None output = "" for part in parts: if line: if len(line) + len(part) >= (WIDTH - ST_WIDTH - 1): output += "%s. \\\n" % line line = " %s" % part else: line = "%s.%s" % (line, part) else: line = part if line: output += "%s %s" % (line, (((WIDTH - ST_WIDTH) - len(line))*".")) sys.stdout.write(output) sys.stdout.flush() interceptor.begin() start = time.time() try: runner = test() finally: interceptor.reset() end = time.time() if interceptor.dirty: if interceptor.last != "\n": sys.stdout.write("\n") sys.stdout.write(output) print " %s" % colorize_word(runner.status()) if runner.failed() or runner.skipped(): print runner.get_formatted_exceptions() root.setLevel(level) filter.patterns = patterns return TestResult(end - start, runner.passed(), runner.skipped(), runner.failed(), runner.get_formatted_exceptions()) class TestResult: def __init__(self, time, passed, skipped, failed, exceptions): self.time = time self.passed = passed self.skipped = skipped self.failed = failed self.exceptions = exceptions class FunctionTest: def __init__(self, test): self.test = test def name(self): return "%s.%s" % (self.test.__module__, self.test.__name__) def run(self): return run_test(self.name(), self._run, config) def _run(self): runner = Runner() runner.run("test", lambda: self.test(config)) return runner def __repr__(self): return "FunctionTest(%r)" % self.test class MethodTest: def __init__(self, cls, method): self.cls = cls self.method = method def name(self): return "%s.%s.%s" % (self.cls.__module__, self.cls.__name__, self.method) def run(self): return run_test(self.name(), self._run, config) def _run(self): runner = Runner() inst = self.cls(self.method) test = getattr(inst, self.method) if hasattr(inst, "configure"): runner.run("configure", lambda: inst.configure(config)) if runner.halt(): return runner if hasattr(inst, "setUp"): runner.run("setup", inst.setUp) if runner.halt(): return runner elif hasattr(inst, "setup"): runner.run("setup", inst.setup) if runner.halt(): return runner runner.run("test", test) if hasattr(inst, "tearDown"): runner.run("teardown", inst.tearDown) elif hasattr(inst, "teardown"): runner.run("teardown", inst.teardown) return runner def __repr__(self): return "MethodTest(%r, %r)" % (self.cls, self.method) class PatternMatcher: def __init__(self, *patterns): self.patterns = patterns def matches(self, name): for p in self.patterns: if match(name, p): return True return False class FunctionScanner(PatternMatcher): def inspect(self, obj): return type(obj) == types.FunctionType and self.matches(name) def descend(self, func): # the None is required for older versions of python return; yield None def extract(self, func): yield FunctionTest(func) class ClassScanner(PatternMatcher): def inspect(self, obj): return type(obj) in (types.ClassType, types.TypeType) and self.matches(obj.__name__) def descend(self, cls): # the None is required for older versions of python return; yield None def extract(self, cls): names = dir(cls) names.sort() for name in names: obj = getattr(cls, name) t = type(obj) if t == types.MethodType and name.startswith("test"): yield MethodTest(cls, name) class ModuleScanner: def inspect(self, obj): return type(obj) == types.ModuleType def descend(self, obj): names = dir(obj) names.sort() for name in names: yield getattr(obj, name) def extract(self, obj): # the None is required for older versions of python return; yield None class Harness: def __init__(self): self.scanners = [ ModuleScanner(), ClassScanner("*Test", "*Tests", "*TestCase"), FunctionScanner("test_*") ] self.tests = [] self.scanned = [] def scan(self, *roots): objects = list(roots) while objects: obj = objects.pop(0) for s in self.scanners: if s.inspect(obj): self.tests.extend(s.extract(obj)) for child in s.descend(obj): if not (child in self.scanned or child in objects): objects.append(child) self.scanned.append(obj) modules = opts.modules if not modules: modules.extend(["qpid.tests"]) h = Harness() for name in modules: m = __import__(name, None, None, ["dummy"]) h.scan(m) filtered = [t for t in h.tests if is_included(t.name())] ignored = [t for t in h.tests if is_ignored(t.name())] total = len(filtered) + len(ignored) if opts.xml and not list_only: xmlr = JunitXmlStyleReporter(opts.xml); xmlr.begin(); else: xmlr = None passed = 0 failed = 0 skipped = 0 start = time.time() for t in filtered: if list_only: print t.name() else: st = t.run() if xmlr: xmlr.report(t.name(), st) if st.passed: passed += 1 elif st.skipped: skipped += 1 elif st.failed: failed += 1 if opts.hoe: break end = time.time() run = passed + failed if not list_only: if passed: _pass = "pass" else: _pass = "fail" if failed: outcome = "fail" else: outcome = "pass" if ignored: ign = "ignored" else: ign = "pass" if skipped: skip = "skip" else: skip = "pass" print colorize("Totals:", 1), totals = [colorize_word("total", "%s tests" % total), colorize_word(_pass, "%s passed" % passed), colorize_word(skip, "%s skipped" % skipped), colorize_word(ign, "%s ignored" % len(ignored)), colorize_word(outcome, "%s failed" % failed)] print ", ".join(totals), if opts.hoe and failed > 0: print " -- (halted after %s)" % run else: print if opts.time and run > 0: print colorize("Timing:", 1), timing = [colorize_word("elapsed", "%.2fs elapsed" % (end - start)), colorize_word("average", "%.2fs average" % ((end - start)/run))] print ", ".join(timing) if xmlr: xmlr.end() if failed or skipped: sys.exit(1) else: sys.exit(0) qpid-python-0.22/python/NOTICE.txt0000644000175000017500000000126710533361256015046 0ustar mbamba========================================================================= == NOTICE file corresponding to the section 4 d of == == the Apache License, Version 2.0, == == in this case for the Apache Qpid distribution. == ========================================================================= This product includes software developed by the Apache Software Foundation (http://www.apache.org/). Please read the LICENSE.txt file present in the root directory of this distribution. Aside from contributions to the Apache Qpid project, this software also includes (binary only): - None at this time qpid-python-0.22/python/README.txt0000644000175000017500000000345711472526140015023 0ustar mbambaThis distribution contains the Python client libraries for Apache Qpid. Apache Qpid is a high-speed, language independent, platform independent enterprise messaging system. It currently provides two messaging brokers (one implemented in C++, one implemented in Java), and messaging client libraries for Java JMS, C++, C# .NET, Python, Ruby, and WCF. The messaging protocol for Apache Qpid is AMQP (Advanced Message Queuing Protocol). You can read more about Qpid here: http://qpid.apache.org/ Documentation can be found here: http://qpid.apache.org/documentation.html = GETTING STARTED = 1. Make sure the Qpid Python client libraries are on your PYTHONPATH. If you have extracted the archive to the directory INSTALLPATH, the following export will work: $ export PYTHONPATH=${PYTHONPATH}:${INSTALLPATH}/qpid-0.8/python 2. Make sure a broker is running 3. Run the 'hello' example from qpid-0.8/python/examples/api: $ ./hello Hello world! = EXAMPLES = The examples/api directory contains several examples. Read examples/README.txt for further details on these examples. = RUNNING THE TESTS = The "tests" directory contains a collection of unit tests for the python client. The "tests_0-10", "tests_0-9", and "tests_0-8" directories contain protocol level conformance tests for AMQP brokers of the specified version. The qpid-python-test script may be used to run these tests. It will by default run the python unit tests and the 0-10 conformance tests: 1. Run a broker on the default port 2. ./qpid-python-test If you wish to run the 0-8 or 0-9 conformence tests, they may be selected as follows: 1. Run a broker on the default port 2. ./qpid-python-test tests_0-8.* -- or -- ./qpid-python-test tests_0-9.* See the qpid-python-test usage for for additional options: ./qpid-python-test -h qpid-python-0.22/python/doc/0000755000175000017500000000000012151237730014060 5ustar mbambaqpid-python-0.22/python/doc/test-requirements.txt0000644000175000017500000000240611121517011020310 0ustar mbamba############################################################################### # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. ############################################################################### * start and stop server, possibly in different configurations, should at least be able to specify host and port * initiate multiple connections/server * initiate multiple channels/connection * enable positive and negative tests for any protocol interaction * test harness must be as robust as possible to spec changes qpid-python-0.22/python/qpid/0000755000175000017500000000000012151237730014250 5ustar mbambaqpid-python-0.22/python/qpid/reference.py0000644000175000017500000000561310577571730016600 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ Support for amqp 'reference' content (as opposed to inline content) """ import threading from queue import Queue, Closed class NotOpened(Exception): pass class AlreadyOpened(Exception): pass """ A representation of a reference id; can be passed wherever amqp content is required in place of inline data """ class ReferenceId: def __init__(self, id): self.id = id """ Holds content received through 'reference api'. Instances of this class will be placed in the consumers queue on receiving a transfer (assuming the reference has been opened). Data can be retrieved in chunks (as append calls are received) or in full (after reference has been closed signalling data s complete). """ class Reference: def __init__(self, id): self.id = id self.chunks = Queue(0) def close(self): self.chunks.close() def append(self, bytes): self.chunks.put(bytes) def get_chunk(self): return self.chunks.get() def get_complete(self): data = "" for chunk in self: data += chunk return data def next(self): try: return self.get_chunk() except Closed, e: raise StopIteration def __iter__(self): return self """ Manages a set of opened references. New references can be opened and existing references can be retrieved or closed. """ class References: def __init__(self): self.map = {} self.lock = threading.Lock() def get(self, id): self.lock.acquire() try: try: ref = self.map[id] except KeyError: raise NotOpened() finally: self.lock.release() return ref def open(self, id): self.lock.acquire() try: if id in self.map: raise AlreadyOpened() self.map[id] = Reference(id) finally: self.lock.release() def close(self, id): self.get(id).close() self.lock.acquire() try: self.map.pop(id) finally: self.lock.release() qpid-python-0.22/python/qpid/client.py0000644000175000017500000001343112046451714016105 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ An AMQP client implementation that uses a custom delegate for interacting with the server. """ import os, threading from peer import Peer, Channel, Closed from delegate import Delegate from util import get_client_properties_with_defaults from connection08 import Connection, Frame, connect from spec08 import load from queue import Queue from reference import ReferenceId, References class Client: def __init__(self, host, port, spec = None, vhost = None): self.host = host self.port = port if spec: self.spec = spec else: from specs_config import amqp_spec_0_9 self.spec = load(amqp_spec_0_9) self.structs = StructFactory(self.spec) self.sessions = {} self.mechanism = None self.response = None self.locale = None self.vhost = vhost if self.vhost == None: self.vhost = "/" self.queues = {} self.lock = threading.Lock() self.closed = False self.reason = None self.started = threading.Event() def wait(self): self.started.wait() if self.closed: raise Closed(self.reason) def queue(self, key): self.lock.acquire() try: try: q = self.queues[key] except KeyError: q = Queue(0) self.queues[key] = q finally: self.lock.release() return q def start(self, response, mechanism="AMQPLAIN", locale="en_US", tune_params=None, client_properties=None): self.mechanism = mechanism self.response = response self.locale = locale self.tune_params = tune_params self.client_properties=get_client_properties_with_defaults(provided_client_properties=client_properties) self.socket = connect(self.host, self.port) self.conn = Connection(self.socket, self.spec) self.peer = Peer(self.conn, ClientDelegate(self), Session) self.conn.init() self.peer.start() self.wait() self.channel(0).connection_open(self.vhost) def channel(self, id): self.lock.acquire() try: ssn = self.peer.channel(id) ssn.client = self self.sessions[id] = ssn finally: self.lock.release() return ssn def session(self): self.lock.acquire() try: id = None for i in xrange(1, 64*1024): if not self.sessions.has_key(i): id = i break finally: self.lock.release() if id == None: raise RuntimeError("out of channels") else: return self.channel(id) def close(self): self.socket.close() class ClientDelegate(Delegate): def __init__(self, client): Delegate.__init__(self) self.client = client def connection_start(self, ch, msg): msg.start_ok(mechanism=self.client.mechanism, response=self.client.response, locale=self.client.locale, client_properties=self.client.client_properties) def connection_tune(self, ch, msg): if self.client.tune_params: #todo: just override the params, i.e. don't require them # all to be included in tune_params msg.tune_ok(**self.client.tune_params) else: msg.tune_ok(*msg.frame.args) self.client.started.set() def message_transfer(self, ch, msg): self.client.queue(msg.destination).put(msg) def message_open(self, ch, msg): ch.references.open(msg.reference) def message_close(self, ch, msg): ch.references.close(msg.reference) def message_append(self, ch, msg): ch.references.get(msg.reference).append(msg.bytes) def message_acquired(self, ch, msg): ch.control_queue.put(msg) def basic_deliver(self, ch, msg): self.client.queue(msg.consumer_tag).put(msg) def channel_pong(self, ch, msg): msg.ok() def channel_close(self, ch, msg): ch.closed(msg) def session_ack(self, ch, msg): pass def session_closed(self, ch, msg): ch.closed(msg) def connection_close(self, ch, msg): self.client.peer.closed(msg) def execution_complete(self, ch, msg): ch.completion.complete(msg.cumulative_execution_mark) def execution_result(self, ch, msg): future = ch.futures[msg.command_id] future.put_response(ch, msg.data) def closed(self, reason): self.client.closed = True self.client.reason = reason self.client.started.set() class StructFactory: def __init__(self, spec): self.spec = spec self.factories = {} def __getattr__(self, name): if self.factories.has_key(name): return self.factories[name] elif self.spec.domains.byname.has_key(name): f = lambda *args, **kwargs: self.struct(name, *args, **kwargs) self.factories[name] = f return f else: raise AttributeError(name) def struct(self, name, *args, **kwargs): return self.spec.struct(name, *args, **kwargs) class Session(Channel): def __init__(self, *args): Channel.__init__(self, *args) self.references = References() self.client = None def open(self): self.session_open() def close(self): self.session_close() self.client.lock.acquire() try: del self.client.sessions[self.id] finally: self.client.lock.release() qpid-python-0.22/python/qpid/log.py0000644000175000017500000000214710765513475015423 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from logging import getLogger, StreamHandler, Formatter from logging import DEBUG, INFO, WARN, ERROR, CRITICAL def enable(name=None, level=WARN, file=None): log = getLogger(name) handler = StreamHandler(file) handler.setFormatter(Formatter("%(asctime)s %(levelname)s %(message)s")) log.addHandler(handler) log.setLevel(level) qpid-python-0.22/python/qpid/messaging/0000755000175000017500000000000012151237730016225 5ustar mbambaqpid-python-0.22/python/qpid/messaging/transports.py0000644000175000017500000001443012123150406021011 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import socket from qpid.util import connect TRANSPORTS = {} class SocketTransport: def __init__(self, conn, host, port): self.socket = connect(host, port) if conn.tcp_nodelay: self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) def fileno(self): return self.socket.fileno() class tcp(SocketTransport): def reading(self, reading): return reading def writing(self, writing): return writing def send(self, bytes): return self.socket.send(bytes) def recv(self, n): return self.socket.recv(n) def close(self): self.socket.close() TRANSPORTS["tcp"] = tcp try: from ssl import wrap_socket, SSLError, SSL_ERROR_WANT_READ, \ SSL_ERROR_WANT_WRITE, CERT_REQUIRED, CERT_NONE except ImportError: ## try the older python SSL api: from socket import ssl class old_ssl(SocketTransport): def __init__(self, conn, host, port): SocketTransport.__init__(self, conn, host, port) # Bug (QPID-4337): this is the "old" version of python SSL. # The private key is required. If a certificate is given, but no # keyfile, assume the key is contained in the certificate ssl_keyfile = conn.ssl_keyfile ssl_certfile = conn.ssl_certfile if ssl_certfile and not ssl_keyfile: ssl_keyfile = ssl_certfile # this version of SSL does NOT perform certificate validation. If the # connection has been configured with CA certs (via ssl_trustfile), then # the application expects the certificate to be validated against the # supplied CA certs. Since this version cannot validate, the peer cannot # be trusted. if conn.ssl_trustfile: raise SSLError("This version of Python does not support verification of the peer's certificate.") self.ssl = ssl(self.socket, keyfile=ssl_keyfile, certfile=ssl_certfile) self.socket.setblocking(1) def reading(self, reading): return reading def writing(self, writing): return writing def recv(self, n): return self.ssl.read(n) def send(self, s): return self.ssl.write(s) def close(self): self.socket.close() TRANSPORTS["ssl"] = old_ssl TRANSPORTS["tcp+tls"] = old_ssl else: class tls(SocketTransport): def __init__(self, conn, host, port): SocketTransport.__init__(self, conn, host, port) if conn.ssl_trustfile: validate = CERT_REQUIRED else: validate = CERT_NONE self.tls = wrap_socket(self.socket, keyfile=conn.ssl_keyfile, certfile=conn.ssl_certfile, ca_certs=conn.ssl_trustfile, cert_reqs=validate) if validate == CERT_REQUIRED and not conn.ssl_skip_hostname_check: match_found = False peer_cert = self.tls.getpeercert() if peer_cert: peer_names = [] if 'subjectAltName' in peer_cert: for san in peer_cert['subjectAltName']: if san[0] == 'DNS': peer_names.append(san[1].lower()) if 'subject' in peer_cert: for sub in peer_cert['subject']: while isinstance(sub, tuple) and isinstance(sub[0],tuple): sub = sub[0] # why the extra level of indirection??? if sub[0] == 'commonName': peer_names.append(sub[1].lower()) for pattern in peer_names: if _match_dns_pattern( host.lower(), pattern ): #print "Match found %s" % pattern match_found = True break if not match_found: raise SSLError("Connection hostname '%s' does not match names from peer certificate: %s" % (host, peer_names)) self.socket.setblocking(0) self.state = None def reading(self, reading): if self.state is None: return reading else: return self.state == SSL_ERROR_WANT_READ def writing(self, writing): if self.state is None: return writing else: return self.state == SSL_ERROR_WANT_WRITE def send(self, bytes): self._clear_state() try: return self.tls.write(bytes) except SSLError, e: if self._update_state(e.args[0]): return 0 else: raise def recv(self, n): self._clear_state() try: return self.tls.read(n) except SSLError, e: if self._update_state(e.args[0]): return None else: raise def _clear_state(self): self.state = None def _update_state(self, code): if code in (SSL_ERROR_WANT_READ, SSL_ERROR_WANT_WRITE): self.state = code return True else: return False def close(self): self.socket.setblocking(1) # this closes the underlying socket self.tls.close() def _match_dns_pattern( hostname, pattern ): """ For checking the hostnames provided by the peer's certificate """ if pattern.find("*") == -1: return hostname == pattern # DNS wildcarded pattern - see RFC2818 h_labels = hostname.split(".") p_labels = pattern.split(".") while h_labels and p_labels: if p_labels[0].find("*") == -1: if p_labels[0] != h_labels[0]: return False else: p = p_labels[0].split("*") if not h_labels[0].startswith(p[0]): return False if not h_labels[0].endswith(p[1]): return False h_labels.pop(0) p_labels.pop(0) return not h_labels and not p_labels TRANSPORTS["ssl"] = tls TRANSPORTS["tcp+tls"] = tls qpid-python-0.22/python/qpid/messaging/util.py0000644000175000017500000000414711765676242017601 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ Add-on utilities for the L{qpid.messaging} API. """ from qpid.messaging import * from logging import getLogger from threading import Thread log = getLogger("qpid.messaging.util") def auto_fetch_reconnect_urls(conn): ssn = conn.session("auto-fetch-reconnect-urls") rcv = ssn.receiver("amq.failover") rcv.capacity = 10 def main(): while True: try: msg = rcv.fetch() except LinkClosed: return set_reconnect_urls(conn, msg) ssn.acknowledge(msg, sync=False) thread = Thread(name="auto-fetch-reconnect-urls", target=main) thread.setDaemon(True) thread.start() def set_reconnect_urls(conn, msg): reconnect_urls = [] urls = msg.properties["amq.failover"] for u in urls: # FIXME aconway 2012-06-12: Nasty hack parsing of the C++ broker's URL format. if u.startswith("amqp:"): for a in u[5:].split(","): parts = a.split(":") # Handle IPv6 addresses which have : in the host part. port = parts[-1] # Last : separated field is port host = ":".join(parts[1:-1]) # First : separated field is protocol, host is the rest. reconnect_urls.append("%s:%s" % (host, port)) conn.reconnect_urls = reconnect_urls log.warn("set reconnect_urls for conn %s: %s", conn, reconnect_urls) __all__ = ["auto_fetch_reconnect_urls", "set_reconnect_urls"] qpid-python-0.22/python/qpid/messaging/__init__.py0000644000175000017500000000237211337320577020351 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ A candidate high level messaging API for python. Areas that still need work: - definition of the arguments for L{Session.sender} and L{Session.receiver} - standard L{Message} properties - L{Message} content encoding - protocol negotiation/multiprotocol impl """ from qpid.datatypes import timestamp, uuid4, Serial from qpid.messaging.constants import * from qpid.messaging.endpoints import * from qpid.messaging.exceptions import * from qpid.messaging.message import * qpid-python-0.22/python/qpid/messaging/exceptions.py0000644000175000017500000000604011427766065020775 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # class Timeout(Exception): pass ## Messaging Errors class MessagingError(Exception): def __init__(self, code=None, text=None, **info): self.code = code self.text = text self.info = info if self.code is None: msg = self.text else: msg = "%s(%s)" % (self.text, self.code) if info: msg += " " + ", ".join(["%s=%r" % (k, v) for k, v in self.info.items()]) Exception.__init__(self, msg) class InternalError(MessagingError): pass ## Connection Errors class ConnectionError(MessagingError): """ The base class for all connection related exceptions. """ pass class ConnectError(ConnectionError): """ Exception raised when there is an error connecting to the remote peer. """ pass class VersionError(ConnectError): pass class AuthenticationFailure(ConnectError): pass class ConnectionClosed(ConnectionError): pass class HeartbeatTimeout(ConnectionError): pass ## Session Errors class SessionError(MessagingError): pass class Detached(SessionError): """ Exception raised when an operation is attempted that is illegal when detached. """ pass class NontransactionalSession(SessionError): """ Exception raised when commit or rollback is attempted on a non transactional session. """ pass class TransactionError(SessionError): pass class TransactionAborted(TransactionError): pass class UnauthorizedAccess(SessionError): pass class ServerError(SessionError): pass class SessionClosed(SessionError): pass ## Link Errors class LinkError(MessagingError): pass class InsufficientCapacity(LinkError): pass class AddressError(LinkError): pass class MalformedAddress(AddressError): pass class InvalidOption(AddressError): pass class ResolutionError(AddressError): pass class AssertionFailed(ResolutionError): pass class NotFound(ResolutionError): pass class LinkClosed(LinkError): pass ## Sender Errors class SenderError(LinkError): pass class SendError(SenderError): pass class TargetCapacityExceeded(SendError): pass ## Receiver Errors class ReceiverError(LinkError): pass class FetchError(ReceiverError): pass class Empty(FetchError): """ Exception raised by L{Receiver.fetch} when there is no message available within the alloted time. """ pass qpid-python-0.22/python/qpid/messaging/endpoints.py0000644000175000017500000007373512123150406020612 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ A candidate high level messaging API for python. Areas that still need work: - definition of the arguments for L{Session.sender} and L{Session.receiver} - standard L{Message} properties - L{Message} content encoding - protocol negotiation/multiprotocol impl """ from logging import getLogger from math import ceil from qpid.codec010 import StringCodec from qpid.concurrency import synchronized, Waiter, Condition from qpid.datatypes import Serial, uuid4 from qpid.messaging.constants import * from qpid.messaging.exceptions import * from qpid.messaging.message import * from qpid.ops import PRIMITIVE from qpid.util import default, URL from threading import Thread, RLock log = getLogger("qpid.messaging") static = staticmethod class Endpoint: def _ecwait(self, predicate, timeout=None): result = self._ewait(lambda: self.closed or predicate(), timeout) self.check_closed() return result class Connection(Endpoint): """ A Connection manages a group of L{Sessions} and connects them with a remote endpoint. """ @static def establish(url=None, **options): """ Constructs a L{Connection} with the supplied parameters and opens it. """ conn = Connection(url, **options) conn.open() return conn def __init__(self, url=None, **options): """ Creates a connection. A newly created connection must be connected with the Connection.connect() method before it can be used. @type url: str @param url: [ [ / ] @ ] [ : ] @type host: str @param host: the name or ip address of the remote host (overriden by url) @type port: int @param port: the port number of the remote host (overriden by url) @type transport: str @param transport: one of tcp, tcp+tls, or ssl (alias for tcp+tls) @type heartbeat: int @param heartbeat: heartbeat interval in seconds @type username: str @param username: the username for authentication (overriden by url) @type password: str @param password: the password for authentication (overriden by url) @type sasl_mechanisms: str @param sasl_mechanisms: space separated list of permitted sasl mechanisms @type sasl_service: str @param sasl_service: ??? @type sasl_min_ssf: ??? @param sasl_min_ssf: ??? @type sasl_max_ssf: ??? @param sasl_max_ssf: ??? @type reconnect: bool @param reconnect: enable/disable automatic reconnect @type reconnect_timeout: float @param reconnect_timeout: total time to attempt reconnect @type reconnect_internal_min: float @param reconnect_internal_min: minimum interval between reconnect attempts @type reconnect_internal_max: float @param reconnect_internal_max: maximum interval between reconnect attempts @type reconnect_internal: float @param reconnect_interval: set both min and max reconnect intervals @type reconnect_limit: int @param reconnect_limit: limit the total number of reconnect attempts @type reconnect_urls: list[str] @param reconnect_urls: list of backup hosts specified as urls @type address_ttl: float @param address_ttl: time until cached address resolution expires @type ssl_keyfile: str @param ssl_keyfile: file with client's private key (PEM format) @type ssl_certfile: str @param ssl_certfile: file with client's public (eventually priv+pub) key (PEM format) @type ssl_trustfile: str @param ssl_trustfile: file trusted certificates to validate the server @type ssl_skip_hostname_check: bool @param ssl_skip_hostname_check: disable verification of hostname in certificate. Use with caution - disabling hostname checking leaves you vulnerable to Man-in-the-Middle attacks. @rtype: Connection @return: a disconnected Connection """ if url is None: url = options.get("host") if isinstance(url, basestring): url = URL(url) self.host = url.host if options.has_key("transport"): self.transport = options.get("transport") elif url.scheme == url.AMQP: self.transport = "tcp" elif url.scheme == url.AMQPS: self.transport = "ssl" else: self.transport = "tcp" if self.transport in ("ssl", "tcp+tls"): self.port = default(url.port, options.get("port", AMQPS_PORT)) else: self.port = default(url.port, options.get("port", AMQP_PORT)) self.heartbeat = options.get("heartbeat") self.username = default(url.user, options.get("username", None)) self.password = default(url.password, options.get("password", None)) self.auth_username = None self.sasl_mechanisms = options.get("sasl_mechanisms") self.sasl_service = options.get("sasl_service", "qpidd") self.sasl_min_ssf = options.get("sasl_min_ssf") self.sasl_max_ssf = options.get("sasl_max_ssf") self.reconnect = options.get("reconnect", False) self.reconnect_timeout = options.get("reconnect_timeout") reconnect_interval = options.get("reconnect_interval") self.reconnect_interval_min = options.get("reconnect_interval_min", default(reconnect_interval, 1)) self.reconnect_interval_max = options.get("reconnect_interval_max", default(reconnect_interval, 2*60)) self.reconnect_limit = options.get("reconnect_limit") self.reconnect_urls = options.get("reconnect_urls", []) self.reconnect_log = options.get("reconnect_log", True) self.address_ttl = options.get("address_ttl", 60) self.tcp_nodelay = options.get("tcp_nodelay", False) self.ssl_keyfile = options.get("ssl_keyfile", None) self.ssl_certfile = options.get("ssl_certfile", None) self.ssl_trustfile = options.get("ssl_trustfile", None) self.ssl_skip_hostname_check = options.get("ssl_skip_hostname_check", False) self.client_properties = options.get("client_properties", {}) self.options = options self.id = str(uuid4()) self.session_counter = 0 self.sessions = {} self._open = False self._connected = False self._transport_connected = False self._lock = RLock() self._condition = Condition(self._lock) self._waiter = Waiter(self._condition) self._modcount = Serial(0) self.error = None from driver import Driver self._driver = Driver(self) def _wait(self, predicate, timeout=None): return self._waiter.wait(predicate, timeout=timeout) def _wakeup(self): self._modcount += 1 self._driver.wakeup() def check_error(self): if self.error: self._condition.gc() raise self.error def get_error(self): return self.error def _ewait(self, predicate, timeout=None): result = self._wait(lambda: self.error or predicate(), timeout) self.check_error() return result def check_closed(self): if not self._connected: self._condition.gc() raise ConnectionClosed() @synchronized def session(self, name=None, transactional=False): """ Creates or retrieves the named session. If the name is omitted or None, then a unique name is chosen based on a randomly generated uuid. @type name: str @param name: the session name @rtype: Session @return: the named Session """ if name is None: name = "%s:%s" % (self.id, self.session_counter) self.session_counter += 1 else: name = "%s:%s" % (self.id, name) if self.sessions.has_key(name): return self.sessions[name] else: ssn = Session(self, name, transactional) self.sessions[name] = ssn self._wakeup() return ssn @synchronized def _remove_session(self, ssn): self.sessions.pop(ssn.name, 0) @synchronized def open(self): """ Opens a connection. """ if self._open: raise ConnectionError("already open") self._open = True self.attach() @synchronized def opened(self): """ Return true if the connection is open, false otherwise. """ return self._open @synchronized def attach(self): """ Attach to the remote endpoint. """ if not self._connected: self._connected = True self._driver.start() self._wakeup() self._ewait(lambda: self._transport_connected and not self._unlinked()) def _unlinked(self): return [l for ssn in self.sessions.values() if not (ssn.error or ssn.closed) for l in ssn.senders + ssn.receivers if not (l.linked or l.error or l.closed)] @synchronized def detach(self, timeout=None): """ Detach from the remote endpoint. """ if self._connected: self._connected = False self._wakeup() cleanup = True else: cleanup = False try: if not self._wait(lambda: not self._transport_connected, timeout=timeout): raise Timeout("detach timed out") finally: if cleanup: self._driver.stop() self._condition.gc() @synchronized def attached(self): """ Return true if the connection is attached, false otherwise. """ return self._connected @synchronized def close(self, timeout=None): """ Close the connection and all sessions. """ try: for ssn in self.sessions.values(): ssn.close(timeout=timeout) finally: self.detach(timeout=timeout) self._open = False class Session(Endpoint): """ Sessions provide a linear context for sending and receiving L{Messages}. L{Messages} are sent and received using the L{Sender.send} and L{Receiver.fetch} methods of the L{Sender} and L{Receiver} objects associated with a Session. Each L{Sender} and L{Receiver} is created by supplying either a target or source address to the L{sender} and L{receiver} methods of the Session. The address is supplied via a string syntax documented below. Addresses ========= An address identifies a source or target for messages. In its simplest form this is just a name. In general a target address may also be used as a source address, however not all source addresses may be used as a target, e.g. a source might additionally have some filtering criteria that would not be present in a target. A subject may optionally be specified along with the name. When an address is used as a target, any subject specified in the address is used as the default subject of outgoing messages for that target. When an address is used as a source, any subject specified in the address is pattern matched against the subject of available messages as a filter for incoming messages from that source. The options map contains additional information about the address including: - policies for automatically creating, and deleting the node to which an address refers - policies for asserting facts about the node to which an address refers - extension points that can be used for sender/receiver configuration Mapping to AMQP 0-10 -------------------- The name is resolved to either an exchange or a queue by querying the broker. The subject is set as a property on the message. Additionally, if the name refers to an exchange, the routing key is set to the subject. Syntax ------ The following regular expressions define the tokens used to parse addresses:: LBRACE: \\{ RBRACE: \\} LBRACK: \\[ RBRACK: \\] COLON: : SEMI: ; SLASH: / COMMA: , NUMBER: [+-]?[0-9]*\\.?[0-9]+ ID: [a-zA-Z_](?:[a-zA-Z0-9_-]*[a-zA-Z0-9_])? STRING: "(?:[^\\\\"]|\\\\.)*"|\'(?:[^\\\\\']|\\\\.)*\' ESC: \\\\[^ux]|\\\\x[0-9a-fA-F][0-9a-fA-F]|\\\\u[0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F] SYM: [.#*%@$^!+-] WSPACE: [ \\n\\r\\t]+ The formal grammar for addresses is given below:: address = name [ "/" subject ] [ ";" options ] name = ( part | quoted )+ subject = ( part | quoted | "/" )* quoted = STRING / ESC part = LBRACE / RBRACE / COLON / COMMA / NUMBER / ID / SYM options = map map = "{" ( keyval ( "," keyval )* )? "}" keyval = ID ":" value value = NUMBER / STRING / ID / map / list list = "[" ( value ( "," value )* )? "]" This grammar resuls in the following informal syntax:: [ / ] [ ; ] Where options is:: { : , ... } And values may be: - numbers - single, double, or non quoted strings - maps (dictionaries) - lists Options ------- The options map permits the following parameters:: [ / ] ; { create: always | sender | receiver | never, delete: always | sender | receiver | never, assert: always | sender | receiver | never, mode: browse | consume, node: { type: queue | topic, durable: True | False, x-declare: { ... ... }, x-bindings: [, ... ] }, link: { name: , durable: True | False, reliability: unreliable | at-most-once | at-least-once | exactly-once, x-declare: { ... ... }, x-bindings: [, ... ], x-subscribe: { ... ... } } } Bindings are specified as a map with the following options:: { exchange: , queue: , key: , arguments: } The create, delete, and assert policies specify who should perfom the associated action: - I{always}: the action will always be performed - I{sender}: the action will only be performed by the sender - I{receiver}: the action will only be performed by the receiver - I{never}: the action will never be performed (this is the default) The node-type is one of: - I{topic}: a topic node will default to the topic exchange, x-declare may be used to specify other exchange types - I{queue}: this is the default node-type The x-declare map permits protocol specific keys and values to be specified when exchanges or queues are declared. These keys and values are passed through when creating a node or asserting facts about an existing node. Examples -------- A simple name resolves to any named node, usually a queue or a topic:: my-queue-or-topic A simple name with a subject will also resolve to a node, but the presence of the subject will cause a sender using this address to set the subject on outgoing messages, and receivers to filter based on the subject:: my-queue-or-topic/my-subject A subject pattern can be used and will cause filtering if used by the receiver. If used for a sender, the literal value gets set as the subject:: my-queue-or-topic/my-* In all the above cases, the address is resolved to an existing node. If you want the node to be auto-created, then you can do the following. By default nonexistent nodes are assumed to be queues:: my-queue; {create: always} You can customize the properties of the queue:: my-queue; {create: always, node: {durable: True}} You can create a topic instead if you want:: my-queue; {create: always, node: {type: topic}} You can assert that the address resolves to a node with particular properties:: my-transient-topic; { assert: always, node: { type: topic, durable: False } } """ def __init__(self, connection, name, transactional): self.connection = connection self.name = name self.log_id = "%x" % id(self) self.transactional = transactional self.committing = False self.committed = True self.aborting = False self.aborted = False self.next_sender_id = 0 self.senders = [] self.next_receiver_id = 0 self.receivers = [] self.outgoing = [] self.incoming = [] self.unacked = [] self.acked = [] # XXX: I hate this name. self.ack_capacity = UNLIMITED self.error = None self.closing = False self.closed = False self._lock = connection._lock def __repr__(self): return "" % self.name def _wait(self, predicate, timeout=None): return self.connection._wait(predicate, timeout=timeout) def _wakeup(self): self.connection._wakeup() def check_error(self): self.connection.check_error() if self.error: raise self.error def get_error(self): err = self.connection.get_error() if err: return err else: return self.error def _ewait(self, predicate, timeout=None): result = self.connection._ewait(lambda: self.error or predicate(), timeout) self.check_error() return result def check_closed(self): if self.closed: raise SessionClosed() @synchronized def sender(self, target, **options): """ Creates a L{Sender} that may be used to send L{Messages} to the specified target. @type target: str @param target: the target to which messages will be sent @rtype: Sender @return: a new Sender for the specified target """ target = _mangle(target) sender = Sender(self, self.next_sender_id, target, options) self.next_sender_id += 1 self.senders.append(sender) if not self.closed and self.connection._connected: self._wakeup() try: sender._ewait(lambda: sender.linked) except LinkError, e: sender.close() raise e return sender @synchronized def receiver(self, source, **options): """ Creates a receiver that may be used to fetch L{Messages} from the specified source. @type source: str @param source: the source of L{Messages} @rtype: Receiver @return: a new Receiver for the specified source """ source = _mangle(source) receiver = Receiver(self, self.next_receiver_id, source, options) self.next_receiver_id += 1 self.receivers.append(receiver) if not self.closed and self.connection._connected: self._wakeup() try: receiver._ewait(lambda: receiver.linked) except LinkError, e: receiver.close() raise e return receiver @synchronized def _count(self, predicate): result = 0 for msg in self.incoming: if predicate(msg): result += 1 return result def _peek(self, receiver): for msg in self.incoming: if msg._receiver == receiver: return msg def _pop(self, receiver): i = 0 while i < len(self.incoming): msg = self.incoming[i] if msg._receiver == receiver: del self.incoming[i] return msg else: i += 1 @synchronized def _get(self, receiver, timeout=None): if self._ewait(lambda: ((self._peek(receiver) is not None) or self.closing or receiver.closed), timeout): msg = self._pop(receiver) if msg is not None: msg._receiver.returned += 1 self.unacked.append(msg) log.debug("RETR[%s]: %s", self.log_id, msg) return msg return None @synchronized def next_receiver(self, timeout=None): if self._ecwait(lambda: self.incoming, timeout): return self.incoming[0]._receiver else: raise Empty @synchronized def acknowledge(self, message=None, disposition=None, sync=True): """ Acknowledge the given L{Message}. If message is None, then all unacknowledged messages on the session are acknowledged. @type message: Message @param message: the message to acknowledge or None @type sync: boolean @param sync: if true then block until the message(s) are acknowledged """ if message is None: messages = self.unacked[:] else: messages = [message] for m in messages: if self.ack_capacity is not UNLIMITED: if self.ack_capacity <= 0: # XXX: this is currently a SendError, maybe it should be a SessionError? raise InsufficientCapacity("ack_capacity = %s" % self.ack_capacity) self._wakeup() self._ecwait(lambda: len(self.acked) < self.ack_capacity) m._disposition = disposition self.unacked.remove(m) self.acked.append(m) self._wakeup() if sync: self._ecwait(lambda: not [m for m in messages if m in self.acked]) @synchronized def commit(self): """ Commit outstanding transactional work. This consists of all message sends and receives since the prior commit or rollback. """ if not self.transactional: raise NontransactionalSession() self.committing = True self._wakeup() self._ecwait(lambda: not self.committing) if self.aborted: raise TransactionAborted() assert self.committed @synchronized def rollback(self): """ Rollback outstanding transactional work. This consists of all message sends and receives since the prior commit or rollback. """ if not self.transactional: raise NontransactionalSession() self.aborting = True self._wakeup() self._ecwait(lambda: not self.aborting) assert self.aborted @synchronized def sync(self, timeout=None): """ Sync the session. """ for snd in self.senders: snd.sync(timeout=timeout) if not self._ewait(lambda: not self.outgoing and not self.acked, timeout=timeout): raise Timeout("session sync timed out") @synchronized def close(self, timeout=None): """ Close the session. """ self.sync(timeout=timeout) for link in self.receivers + self.senders: link.close(timeout=timeout) if not self.closing: self.closing = True self._wakeup() try: if not self._ewait(lambda: self.closed, timeout=timeout): raise Timeout("session close timed out") finally: self.connection._remove_session(self) def _mangle(addr): if addr and addr.startswith("#"): return str(uuid4()) + addr else: return addr class Sender(Endpoint): """ Sends outgoing messages. """ def __init__(self, session, id, target, options): self.session = session self.id = id self.target = target self.options = options self.capacity = options.get("capacity", UNLIMITED) self.threshold = 0.5 self.durable = options.get("durable") self.queued = Serial(0) self.synced = Serial(0) self.acked = Serial(0) self.error = None self.linked = False self.closing = False self.closed = False self._lock = self.session._lock def _wakeup(self): self.session._wakeup() def check_error(self): self.session.check_error() if self.error: raise self.error def get_error(self): err = self.session.get_error() if err: return err else: return self.error def _ewait(self, predicate, timeout=None): result = self.session._ewait(lambda: self.error or predicate(), timeout) self.check_error() return result def check_closed(self): if self.closed: raise LinkClosed() @synchronized def unsettled(self): """ Returns the number of messages awaiting acknowledgment. @rtype: int @return: the number of unacknowledged messages """ return self.queued - self.acked @synchronized def available(self): if self.capacity is UNLIMITED: return UNLIMITED else: return self.capacity - self.unsettled() @synchronized def send(self, object, sync=True, timeout=None): """ Send a message. If the object passed in is of type L{unicode}, L{str}, L{list}, or L{dict}, it will automatically be wrapped in a L{Message} and sent. If it is of type L{Message}, it will be sent directly. If the sender capacity is not L{UNLIMITED} then send will block until there is available capacity to send the message. If the timeout parameter is specified, then send will throw an L{InsufficientCapacity} exception if capacity does not become available within the specified time. @type object: unicode, str, list, dict, Message @param object: the message or content to send @type sync: boolean @param sync: if true then block until the message is sent @type timeout: float @param timeout: the time to wait for available capacity """ if not self.session.connection._connected or self.session.closing: raise Detached() self._ecwait(lambda: self.linked) if isinstance(object, Message): message = object else: message = Message(object) if message.durable is None: message.durable = self.durable if self.capacity is not UNLIMITED: if self.capacity <= 0: raise InsufficientCapacity("capacity = %s" % self.capacity) if not self._ecwait(self.available, timeout=timeout): raise InsufficientCapacity("capacity = %s" % self.capacity) # XXX: what if we send the same message to multiple senders? message._sender = self if self.capacity is not UNLIMITED: message._sync = sync or self.available() <= int(ceil(self.threshold*self.capacity)) else: message._sync = sync self.session.outgoing.append(message) self.queued += 1 if sync: self.sync(timeout=timeout) assert message not in self.session.outgoing else: self._wakeup() @synchronized def sync(self, timeout=None): mno = self.queued if self.synced < mno: self.synced = mno self._wakeup() if not self._ewait(lambda: self.acked >= mno, timeout=timeout): raise Timeout("sender sync timed out") @synchronized def close(self, timeout=None): """ Close the Sender. """ # avoid erroring out when closing a sender that was never # established if self.acked < self.queued: self.sync(timeout=timeout) if not self.closing: self.closing = True self._wakeup() try: if not self.session._ewait(lambda: self.closed, timeout=timeout): raise Timeout("sender close timed out") finally: try: self.session.senders.remove(self) except ValueError: pass class Receiver(Endpoint, object): """ Receives incoming messages from a remote source. Messages may be fetched with L{fetch}. """ def __init__(self, session, id, source, options): self.session = session self.id = id self.source = source self.options = options self.granted = Serial(0) self.draining = False self.impending = Serial(0) self.received = Serial(0) self.returned = Serial(0) self.error = None self.linked = False self.closing = False self.closed = False self._lock = self.session._lock self._capacity = 0 self._set_capacity(options.get("capacity", 0), False) self.threshold = 0.5 @synchronized def _set_capacity(self, c, wakeup=True): if c is UNLIMITED: self._capacity = c.value else: self._capacity = c self._grant() if wakeup: self._wakeup() def _get_capacity(self): if self._capacity == UNLIMITED.value: return UNLIMITED else: return self._capacity capacity = property(_get_capacity, _set_capacity) def _wakeup(self): self.session._wakeup() def check_error(self): self.session.check_error() if self.error: raise self.error def get_error(self): err = self.session.get_error() if err: return err else: return self.error def _ewait(self, predicate, timeout=None): result = self.session._ewait(lambda: self.error or predicate(), timeout) self.check_error() return result def check_closed(self): if self.closed: raise LinkClosed() @synchronized def unsettled(self): """ Returns the number of acknowledged messages awaiting confirmation. """ return len([m for m in self.acked if m._receiver is self]) @synchronized def available(self): """ Returns the number of messages available to be fetched by the application. @rtype: int @return: the number of available messages """ return self.received - self.returned @synchronized def fetch(self, timeout=None): """ Fetch and return a single message. A timeout of None will block forever waiting for a message to arrive, a timeout of zero will return immediately if no messages are available. @type timeout: float @param timeout: the time to wait for a message to be available """ self._ecwait(lambda: self.linked) if self._capacity == 0: self.granted = self.returned + 1 self._wakeup() self._ecwait(lambda: self.impending >= self.granted) msg = self.session._get(self, timeout=timeout) if msg is None: self.check_closed() self.draining = True self._wakeup() self._ecwait(lambda: not self.draining) msg = self.session._get(self, timeout=0) self._grant() self._wakeup() if msg is None: raise Empty() elif self._capacity not in (0, UNLIMITED.value): t = int(ceil(self.threshold * self._capacity)) if self.received - self.returned <= t: self.granted = self.returned + self._capacity self._wakeup() return msg def _grant(self): if self._capacity == UNLIMITED.value: self.granted = UNLIMITED else: self.granted = self.returned + self._capacity @synchronized def close(self, timeout=None): """ Close the receiver. """ if not self.closing: self.closing = True self._wakeup() try: if not self.session._ewait(lambda: self.closed, timeout=timeout): raise Timeout("receiver close timed out") finally: try: self.session.receivers.remove(self) except ValueError: pass __all__ = ["Connection", "Session", "Sender", "Receiver"] qpid-python-0.22/python/qpid/messaging/constants.py0000644000175000017500000000222311346031515020610 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # __SELF__ = object() class Constant: def __init__(self, name, value=__SELF__): self.name = name if value is __SELF__: self.value = self else: self.value = value def __repr__(self): return self.name AMQP_PORT = 5672 AMQPS_PORT = 5671 UNLIMITED = Constant("UNLIMITED", 0xFFFFFFFFL) REJECTED = Constant("REJECTED") RELEASED = Constant("RELEASED") qpid-python-0.22/python/qpid/messaging/message.py0000644000175000017500000001160211416615777020241 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from qpid.codec010 import StringCodec from qpid.ops import PRIMITIVE def codec(name): type = PRIMITIVE[name] def encode(x): sc = StringCodec() sc.write_primitive(type, x) return sc.encoded def decode(x): sc = StringCodec(x) return sc.read_primitive(type) return encode, decode # XXX: need to correctly parse the mime type and deal with # content-encoding header TYPE_MAPPINGS={ dict: "amqp/map", list: "amqp/list", unicode: "text/plain; charset=utf8", unicode: "text/plain", buffer: None, str: None, None.__class__: None } DEFAULT_CODEC = (lambda x: x, lambda x: x) def encode_text_plain(x): if x is None: return None else: return x.encode("utf8") def decode_text_plain(x): if x is None: return None else: return x.decode("utf8") TYPE_CODEC={ "amqp/map": codec("map"), "amqp/list": codec("list"), "text/plain; charset=utf8": (encode_text_plain, decode_text_plain), "text/plain": (encode_text_plain, decode_text_plain), "": DEFAULT_CODEC, None: DEFAULT_CODEC } def get_type(content): return TYPE_MAPPINGS[content.__class__] def get_codec(content_type): return TYPE_CODEC.get(content_type, DEFAULT_CODEC) UNSPECIFIED = object() class Message: """ A message consists of a standard set of fields, an application defined set of properties, and some content. @type id: str @ivar id: the message id @type subject: str @ivar subject: message subject @type user_id: str @ivar user_id: the user-id of the message producer @type reply_to: str @ivar reply_to: the address to send replies @type correlation_id: str @ivar correlation_id: a correlation-id for the message @type durable: bool @ivar durable: message durability @type priority: int @ivar priority: message priority @type ttl: float @ivar ttl: time-to-live measured in seconds @type properties: dict @ivar properties: application specific message properties @type content_type: str @ivar content_type: the content-type of the message @type content: str, unicode, buffer, dict, list @ivar content: the message content """ def __init__(self, content=None, content_type=UNSPECIFIED, id=None, subject=None, user_id=None, reply_to=None, correlation_id=None, durable=None, priority=None, ttl=None, properties=None): """ Construct a new message with the supplied content. The content-type of the message will be automatically inferred from type of the content parameter. @type content: str, unicode, buffer, dict, list @param content: the message content @type content_type: str @param content_type: the content-type of the message """ self.id = id self.subject = subject self.user_id = user_id self.reply_to = reply_to self.correlation_id = correlation_id self.durable = durable self.priority = priority self.ttl = ttl self.redelivered = False if properties is None: self.properties = {} else: self.properties = properties if content_type is UNSPECIFIED: self.content_type = get_type(content) else: self.content_type = content_type self.content = content def __repr__(self): args = [] for name in ["id", "subject", "user_id", "reply_to", "correlation_id", "priority", "ttl"]: value = self.__dict__[name] if value is not None: args.append("%s=%r" % (name, value)) for name in ["durable", "redelivered", "properties"]: value = self.__dict__[name] if value: args.append("%s=%r" % (name, value)) if self.content_type != get_type(self.content): args.append("content_type=%r" % self.content_type) if self.content is not None: if args: args.append("content=%r" % self.content) else: args.append(repr(self.content)) return "Message(%s)" % ", ".join(args) class Disposition: def __init__(self, type, **options): self.type = type self.options = options def __repr__(self): args = [str(self.type)] + \ ["%s=%r" % (k, v) for k, v in self.options.items()] return "Disposition(%s)" % ", ".join(args) __all__ = ["Message", "Disposition"] qpid-python-0.22/python/qpid/messaging/driver.py0000644000175000017500000011544312046451714020105 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import socket, struct, sys, time from logging import getLogger, DEBUG from qpid import compat from qpid import sasl from qpid.concurrency import synchronized from qpid.datatypes import RangedSet, Serial from qpid.framing import OpEncoder, SegmentEncoder, FrameEncoder, \ FrameDecoder, SegmentDecoder, OpDecoder from qpid.messaging import address, transports from qpid.messaging.constants import UNLIMITED, REJECTED, RELEASED from qpid.messaging.exceptions import * from qpid.messaging.message import get_codec, Disposition, Message from qpid.ops import * from qpid.selector import Selector from qpid.util import URL, default,get_client_properties_with_defaults from qpid.validator import And, Context, List, Map, Types, Values from threading import Condition, Thread log = getLogger("qpid.messaging") rawlog = getLogger("qpid.messaging.io.raw") opslog = getLogger("qpid.messaging.io.ops") def addr2reply_to(addr): name, subject, options = address.parse(addr) if options: type = options.get("node", {}).get("type") else: type = None if type == "topic": return ReplyTo(name, subject) else: return ReplyTo(None, name) def reply_to2addr(reply_to): if reply_to.exchange in (None, ""): return reply_to.routing_key elif reply_to.routing_key is None: return "%s; {node: {type: topic}}" % reply_to.exchange else: return "%s/%s; {node: {type: topic}}" % (reply_to.exchange, reply_to.routing_key) class Attachment: def __init__(self, target): self.target = target # XXX DURABLE_DEFAULT=False # XXX class Pattern: """ The pattern filter matches the supplied wildcard pattern against a message subject. """ def __init__(self, value): self.value = value # XXX: this should become part of the driver def _bind(self, sst, exchange, queue): from qpid.ops import ExchangeBind sst.write_cmd(ExchangeBind(exchange=exchange, queue=queue, binding_key=self.value.replace("*", "#"))) SUBJECT_DEFAULTS = { "topic": "#" } def noop(): pass def sync_noop(): pass class SessionState: def __init__(self, driver, session, name, channel): self.driver = driver self.session = session self.name = name self.channel = channel self.detached = False self.committing = False self.aborting = False # sender state self.sent = Serial(0) self.acknowledged = RangedSet() self.actions = {} self.min_completion = self.sent self.max_completion = self.sent self.results = {} self.need_sync = False # receiver state self.received = None self.executed = RangedSet() # XXX: need to periodically exchange completion/known_completion self.destinations = {} def write_query(self, query, handler): id = self.sent self.write_cmd(query, lambda: handler(self.results.pop(id))) def apply_overrides(self, cmd, overrides): for k, v in overrides.items(): cmd[k.replace('-', '_')] = v def write_cmd(self, cmd, action=noop, overrides=None, sync=True): if overrides: self.apply_overrides(cmd, overrides) if action != noop: cmd.sync = sync if self.detached: raise Exception("detached") cmd.id = self.sent self.sent += 1 self.actions[cmd.id] = action self.max_completion = cmd.id self.write_op(cmd) self.need_sync = not cmd.sync def write_cmds(self, cmds, action=noop): if cmds: for cmd in cmds[:-1]: self.write_cmd(cmd) self.write_cmd(cmds[-1], action) else: action() def write_op(self, op): op.channel = self.channel self.driver.write_op(op) POLICIES = Values("always", "sender", "receiver", "never") RELIABILITY = Values("unreliable", "at-most-once", "at-least-once", "exactly-once") DECLARE = Map({}, restricted=False) BINDINGS = List(Map({ "exchange": Types(basestring), "queue": Types(basestring), "key": Types(basestring), "arguments": Map({}, restricted=False) })) COMMON_OPTS = { "create": POLICIES, "delete": POLICIES, "assert": POLICIES, "node": Map({ "type": Values("queue", "topic"), "durable": Types(bool), "x-declare": DECLARE, "x-bindings": BINDINGS }), "link": Map({ "name": Types(basestring), "durable": Types(bool), "reliability": RELIABILITY, "x-declare": DECLARE, "x-bindings": BINDINGS, "x-subscribe": Map({}, restricted=False) }) } RECEIVE_MODES = Values("browse", "consume") SOURCE_OPTS = COMMON_OPTS.copy() SOURCE_OPTS.update({ "mode": RECEIVE_MODES }) TARGET_OPTS = COMMON_OPTS.copy() class LinkIn: ADDR_NAME = "source" DIR_NAME = "receiver" VALIDATOR = Map(SOURCE_OPTS) def init_link(self, sst, rcv, _rcv): _rcv.destination = str(rcv.id) sst.destinations[_rcv.destination] = _rcv _rcv.draining = False _rcv.bytes_open = False _rcv.on_unlink = [] def do_link(self, sst, rcv, _rcv, type, subtype, action): link_opts = _rcv.options.get("link", {}) if type == "topic": default_reliability = "unreliable" else: default_reliability = "at-least-once" reliability = link_opts.get("reliability", default_reliability) declare = link_opts.get("x-declare", {}) subscribe = link_opts.get("x-subscribe", {}) acq_mode = acquire_mode.pre_acquired if reliability in ("unreliable", "at-most-once"): rcv._accept_mode = accept_mode.none else: rcv._accept_mode = accept_mode.explicit if type == "topic": default_name = "%s.%s" % (rcv.session.name, _rcv.destination) _rcv._queue = link_opts.get("name", default_name) sst.write_cmd(QueueDeclare(queue=_rcv._queue, durable=link_opts.get("durable", False), exclusive=True, auto_delete=(reliability == "unreliable")), overrides=declare) _rcv.on_unlink = [QueueDelete(_rcv._queue)] subject = _rcv.subject or SUBJECT_DEFAULTS.get(subtype) bindings = get_bindings(link_opts, _rcv._queue, _rcv.name, subject) if not bindings: sst.write_cmd(ExchangeBind(_rcv._queue, _rcv.name, subject)) elif type == "queue": _rcv._queue = _rcv.name if _rcv.options.get("mode", "consume") == "browse": acq_mode = acquire_mode.not_acquired bindings = get_bindings(link_opts, queue=_rcv._queue) sst.write_cmds(bindings) sst.write_cmd(MessageSubscribe(queue=_rcv._queue, destination=_rcv.destination, acquire_mode = acq_mode, accept_mode = rcv._accept_mode), overrides=subscribe) sst.write_cmd(MessageSetFlowMode(_rcv.destination, flow_mode.credit), action) def do_unlink(self, sst, rcv, _rcv, action=noop): link_opts = _rcv.options.get("link", {}) reliability = link_opts.get("reliability") cmds = [MessageCancel(_rcv.destination)] cmds.extend(_rcv.on_unlink) sst.write_cmds(cmds, action) def del_link(self, sst, rcv, _rcv): del sst.destinations[_rcv.destination] class LinkOut: ADDR_NAME = "target" DIR_NAME = "sender" VALIDATOR = Map(TARGET_OPTS) def init_link(self, sst, snd, _snd): _snd.closing = False _snd.pre_ack = False def do_link(self, sst, snd, _snd, type, subtype, action): link_opts = _snd.options.get("link", {}) reliability = link_opts.get("reliability", "at-least-once") _snd.pre_ack = reliability in ("unreliable", "at-most-once") if type == "topic": _snd._exchange = _snd.name _snd._routing_key = _snd.subject bindings = get_bindings(link_opts, exchange=_snd.name, key=_snd.subject) elif type == "queue": _snd._exchange = "" _snd._routing_key = _snd.name bindings = get_bindings(link_opts, queue=_snd.name) sst.write_cmds(bindings, action) def do_unlink(self, sst, snd, _snd, action=noop): action() def del_link(self, sst, snd, _snd): pass class Cache: def __init__(self, ttl): self.ttl = ttl self.entries = {} def __setitem__(self, key, value): self.entries[key] = time.time(), value def __getitem__(self, key): tstamp, value = self.entries[key] if time.time() - tstamp >= self.ttl: del self.entries[key] raise KeyError(key) else: return value def __delitem__(self, key): del self.entries[key] # XXX HEADER="!4s4B" EMPTY_DP = DeliveryProperties() EMPTY_MP = MessageProperties() SUBJECT = "qpid.subject" CLOSED = "CLOSED" READ_ONLY = "READ_ONLY" WRITE_ONLY = "WRITE_ONLY" OPEN = "OPEN" class Driver: def __init__(self, connection): self.connection = connection self.log_id = "%x" % id(self.connection) self._lock = self.connection._lock self._selector = Selector.default() self._attempts = 0 self._delay = self.connection.reconnect_interval_min self._reconnect_log = self.connection.reconnect_log self._host = 0 self._retrying = False self._next_retry = None self._transport = None self._timeout = None self.engine = None def _next_host(self): urls = [URL(u) for u in self.connection.reconnect_urls] hosts = [(self.connection.host, default(self.connection.port, 5672))] + \ [(u.host, default(u.port, 5672)) for u in urls] if self._host >= len(hosts): self._host = 0 result = hosts[self._host] if self._host == 0: self._attempts += 1 self._host = self._host + 1 return result def _num_hosts(self): return len(self.connection.reconnect_urls) + 1 @synchronized def wakeup(self): self.dispatch() self._selector.wakeup() def start(self): self._selector.register(self) def stop(self): self._selector.unregister(self) if self._transport: self.st_closed() def fileno(self): return self._transport.fileno() @synchronized def reading(self): return self._transport is not None and \ self._transport.reading(True) @synchronized def writing(self): return self._transport is not None and \ self._transport.writing(self.engine.pending()) @synchronized def timing(self): return self._timeout @synchronized def readable(self): try: data = self._transport.recv(64*1024) if data is None: return elif data: rawlog.debug("READ[%s]: %r", self.log_id, data) self.engine.write(data) else: self.close_engine() except socket.error, e: self.close_engine(ConnectionError(text=str(e))) self.update_status() self._notify() def _notify(self): if self.connection.error: self.connection._condition.gc() self.connection._waiter.notifyAll() def close_engine(self, e=None): if e is None: e = ConnectionError(text="connection aborted") if (self.connection.reconnect and (self.connection.reconnect_limit is None or self.connection.reconnect_limit <= 0 or self._attempts <= self.connection.reconnect_limit)): if self._host < self._num_hosts(): delay = 0 else: delay = self._delay self._delay = min(2*self._delay, self.connection.reconnect_interval_max) self._next_retry = time.time() + delay if self._reconnect_log: log.warn("recoverable error[attempt %s]: %s" % (self._attempts, e)) if delay > 0: log.warn("sleeping %s seconds" % delay) self._retrying = True self.engine.close() else: self.engine.close(e) self.schedule() def update_status(self): status = self.engine.status() return getattr(self, "st_%s" % status.lower())() def st_closed(self): # XXX: this log statement seems to sometimes hit when the socket is not connected # XXX: rawlog.debug("CLOSE[%s]: %s", self.log_id, self._socket.getpeername()) self._transport.close() self._transport = None self.engine = None return True def st_open(self): return False @synchronized def writeable(self): notify = False try: n = self._transport.send(self.engine.peek()) if n == 0: return sent = self.engine.read(n) rawlog.debug("SENT[%s]: %r", self.log_id, sent) except socket.error, e: self.close_engine(e) notify = True if self.update_status() or notify: self._notify() @synchronized def timeout(self): self.dispatch() self._notify() self.schedule() def schedule(self): times = [] if self.connection.heartbeat: times.append(time.time() + self.connection.heartbeat) if self._next_retry: times.append(self._next_retry) if times: self._timeout = min(times) else: self._timeout = None def dispatch(self): try: if self._transport is None: if self.connection._connected and not self.connection.error: self.connect() else: self.engine.dispatch() except HeartbeatTimeout, e: self.close_engine(e) except: # XXX: Does socket get leaked if this occurs? msg = compat.format_exc() self.connection.error = InternalError(text=msg) def connect(self): if self._retrying and time.time() < self._next_retry: return try: # XXX: should make this non blocking host, port = self._next_host() if self._retrying and self._reconnect_log: log.warn("trying: %s:%s", host, port) self.engine = Engine(self.connection) self.engine.open() rawlog.debug("OPEN[%s]: %s:%s", self.log_id, host, port) trans = transports.TRANSPORTS.get(self.connection.transport) if trans: self._transport = trans(self.connection, host, port) else: raise ConnectError("no such transport: %s" % self.connection.transport) if self._retrying and self._reconnect_log: log.warn("reconnect succeeded: %s:%s", host, port) self._next_retry = None self._attempts = 0 self._delay = self.connection.reconnect_interval_min self._retrying = False self.schedule() except socket.error, e: self.close_engine(ConnectError(text=str(e))) DEFAULT_DISPOSITION = Disposition(None) def get_bindings(opts, queue=None, exchange=None, key=None): bindings = opts.get("x-bindings", []) cmds = [] for b in bindings: exchange = b.get("exchange", exchange) queue = b.get("queue", queue) key = b.get("key", key) args = b.get("arguments", {}) cmds.append(ExchangeBind(queue, exchange, key, args)) return cmds CONNECTION_ERRS = { # anythong not here (i.e. everything right now) will default to # connection error } SESSION_ERRS = { # anything not here will default to session error error_code.unauthorized_access: UnauthorizedAccess, error_code.not_found: NotFound, error_code.resource_locked: ReceiverError, error_code.resource_limit_exceeded: TargetCapacityExceeded, error_code.internal_error: ServerError } class Engine: def __init__(self, connection): self.connection = connection self.log_id = "%x" % id(self.connection) self._closing = False self._connected = False self._attachments = {} self._in = LinkIn() self._out = LinkOut() self._channel_max = 65536 self._channels = 0 self._sessions = {} self.address_cache = Cache(self.connection.address_ttl) self._status = CLOSED self._buf = "" self._hdr = "" self._last_in = None self._last_out = None self._op_enc = OpEncoder() self._seg_enc = SegmentEncoder() self._frame_enc = FrameEncoder() self._frame_dec = FrameDecoder() self._seg_dec = SegmentDecoder() self._op_dec = OpDecoder() self._sasl = sasl.Client() if self.connection.username: self._sasl.setAttr("username", self.connection.username) if self.connection.password: self._sasl.setAttr("password", self.connection.password) if self.connection.host: self._sasl.setAttr("host", self.connection.host) self._sasl.setAttr("service", self.connection.sasl_service) if self.connection.sasl_min_ssf is not None: self._sasl.setAttr("minssf", self.connection.sasl_min_ssf) if self.connection.sasl_max_ssf is not None: self._sasl.setAttr("maxssf", self.connection.sasl_max_ssf) self._sasl.init() self._sasl_encode = False self._sasl_decode = False def _reset(self): self.connection._transport_connected = False for ssn in self.connection.sessions.values(): for m in ssn.acked + ssn.unacked + ssn.incoming: m._transfer_id = None for snd in ssn.senders: snd.linked = False for rcv in ssn.receivers: rcv.impending = rcv.received rcv.linked = False def status(self): return self._status def write(self, data): self._last_in = time.time() try: if self._sasl_decode: data = self._sasl.decode(data) if len(self._hdr) < 8: r = 8 - len(self._hdr) self._hdr += data[:r] data = data[r:] if len(self._hdr) == 8: self.do_header(self._hdr) self._frame_dec.write(data) self._seg_dec.write(*self._frame_dec.read()) self._op_dec.write(*self._seg_dec.read()) for op in self._op_dec.read(): self.assign_id(op) opslog.debug("RCVD[%s]: %r", self.log_id, op) op.dispatch(self) self.dispatch() except MessagingError, e: self.close(e) except: self.close(InternalError(text=compat.format_exc())) def close(self, e=None): self._reset() if e: self.connection.error = e self._status = CLOSED def assign_id(self, op): if isinstance(op, Command): sst = self.get_sst(op) op.id = sst.received sst.received += 1 def pending(self): return len(self._buf) def read(self, n): result = self._buf[:n] self._buf = self._buf[n:] return result def peek(self): return self._buf def write_op(self, op): opslog.debug("SENT[%s]: %r", self.log_id, op) self._op_enc.write(op) self._seg_enc.write(*self._op_enc.read()) self._frame_enc.write(*self._seg_enc.read()) bytes = self._frame_enc.read() if self._sasl_encode: bytes = self._sasl.encode(bytes) self._buf += bytes self._last_out = time.time() def do_header(self, hdr): cli_major = 0; cli_minor = 10 magic, _, _, major, minor = struct.unpack(HEADER, hdr) if major != cli_major or minor != cli_minor: raise VersionError(text="client: %s-%s, server: %s-%s" % (cli_major, cli_minor, major, minor)) def do_connection_start(self, start): if self.connection.sasl_mechanisms: permitted = self.connection.sasl_mechanisms.split() mechs = [m for m in start.mechanisms if m in permitted] else: mechs = start.mechanisms try: mech, initial = self._sasl.start(" ".join(mechs)) except sasl.SASLError, e: raise AuthenticationFailure(text=str(e)) client_properties = get_client_properties_with_defaults(provided_client_properties=self.connection.client_properties); self.write_op(ConnectionStartOk(client_properties=client_properties, mechanism=mech, response=initial)) def do_connection_secure(self, secure): resp = self._sasl.step(secure.challenge) self.write_op(ConnectionSecureOk(response=resp)) def do_connection_tune(self, tune): # XXX: is heartbeat protocol specific? if tune.channel_max is not None: self.channel_max = tune.channel_max self.write_op(ConnectionTuneOk(heartbeat=self.connection.heartbeat, channel_max=self.channel_max)) self.write_op(ConnectionOpen()) self._sasl_encode = True def do_connection_open_ok(self, open_ok): self.connection.auth_username = self._sasl.auth_username() self._connected = True self._sasl_decode = True self.connection._transport_connected = True def do_connection_heartbeat(self, hrt): pass def do_connection_close(self, close): self.write_op(ConnectionCloseOk()) if close.reply_code != close_code.normal: exc = CONNECTION_ERRS.get(close.reply_code, ConnectionError) self.connection.error = exc(close.reply_code, close.reply_text) # XXX: should we do a half shutdown on the socket here? # XXX: we really need to test this, we may end up reporting a # connection abort after this, if we were to do a shutdown on read # and stop reading, then we wouldn't report the abort, that's # probably the right thing to do def do_connection_close_ok(self, close_ok): self.close() def do_session_attached(self, atc): pass def do_session_command_point(self, cp): sst = self.get_sst(cp) sst.received = cp.command_id def do_session_completed(self, sc): sst = self.get_sst(sc) for r in sc.commands: sst.acknowledged.add(r.lower, r.upper) if not sc.commands.empty(): while sst.min_completion in sc.commands: if sst.actions.has_key(sst.min_completion): sst.actions.pop(sst.min_completion)() sst.min_completion += 1 def session_known_completed(self, kcmp): sst = self.get_sst(kcmp) executed = RangedSet() for e in sst.executed.ranges: for ke in kcmp.ranges: if e.lower in ke and e.upper in ke: break else: executed.add_range(e) sst.executed = completed def do_session_flush(self, sf): sst = self.get_sst(sf) if sf.expected: if sst.received is None: exp = None else: exp = RangedSet(sst.received) sst.write_op(SessionExpected(exp)) if sf.confirmed: sst.write_op(SessionConfirmed(sst.executed)) if sf.completed: sst.write_op(SessionCompleted(sst.executed)) def do_session_request_timeout(self, rt): sst = self.get_sst(rt) sst.write_op(SessionTimeout(timeout=0)) def do_execution_result(self, er): sst = self.get_sst(er) sst.results[er.command_id] = er.value sst.executed.add(er.id) def do_execution_exception(self, ex): sst = self.get_sst(ex) exc = SESSION_ERRS.get(ex.error_code, SessionError) sst.session.error = exc(ex.error_code, ex.description) def dispatch(self): if not self.connection._connected and not self._closing and self._status != CLOSED: self.disconnect() if self._connected and not self._closing: for ssn in self.connection.sessions.values(): self.attach(ssn) self.process(ssn) if self.connection.heartbeat and self._status != CLOSED: now = time.time() if self._last_in is not None and \ now - self._last_in > 2*self.connection.heartbeat: raise HeartbeatTimeout(text="heartbeat timeout") if self._last_out is None or now - self._last_out >= self.connection.heartbeat/2.0: self.write_op(ConnectionHeartbeat()) def open(self): self._reset() self._status = OPEN self._buf += struct.pack(HEADER, "AMQP", 1, 1, 0, 10) def disconnect(self): self.write_op(ConnectionClose(close_code.normal)) self._closing = True def attach(self, ssn): if ssn.closed: return sst = self._attachments.get(ssn) if sst is None: for i in xrange(0, self.channel_max): if not self._sessions.has_key(i): ch = i break else: raise RuntimeError("all channels used") sst = SessionState(self, ssn, ssn.name, ch) sst.write_op(SessionAttach(name=ssn.name)) sst.write_op(SessionCommandPoint(sst.sent, 0)) sst.outgoing_idx = 0 sst.acked = [] sst.acked_idx = 0 if ssn.transactional: sst.write_cmd(TxSelect()) self._attachments[ssn] = sst self._sessions[sst.channel] = sst for snd in ssn.senders: self.link(snd, self._out, snd.target) for rcv in ssn.receivers: self.link(rcv, self._in, rcv.source) if sst is not None and ssn.closing and not sst.detached: sst.detached = True sst.write_op(SessionDetach(name=ssn.name)) def get_sst(self, op): return self._sessions[op.channel] def do_session_detached(self, dtc): sst = self._sessions.pop(dtc.channel) ssn = sst.session del self._attachments[ssn] ssn.closed = True def do_session_detach(self, dtc): sst = self.get_sst(dtc) sst.write_op(SessionDetached(name=dtc.name)) self.do_session_detached(dtc) def link(self, lnk, dir, addr): sst = self._attachments.get(lnk.session) _lnk = self._attachments.get(lnk) if _lnk is None and not lnk.closed: _lnk = Attachment(lnk) _lnk.closing = False dir.init_link(sst, lnk, _lnk) err = self.parse_address(_lnk, dir, addr) or self.validate_options(_lnk, dir) if err: lnk.error = err lnk.closed = True return def linked(): lnk.linked = True def resolved(type, subtype): dir.do_link(sst, lnk, _lnk, type, subtype, linked) self.resolve_declare(sst, _lnk, dir.DIR_NAME, resolved) self._attachments[lnk] = _lnk if lnk.linked and lnk.closing and not lnk.closed: if not _lnk.closing: def unlinked(): dir.del_link(sst, lnk, _lnk) del self._attachments[lnk] lnk.closed = True if _lnk.options.get("delete") in ("always", dir.DIR_NAME): dir.do_unlink(sst, lnk, _lnk) self.delete(sst, _lnk.name, unlinked) else: dir.do_unlink(sst, lnk, _lnk, unlinked) _lnk.closing = True elif not lnk.linked and lnk.closing and not lnk.closed: if lnk.error: lnk.closed = True def parse_address(self, lnk, dir, addr): if addr is None: return MalformedAddress(text="%s is None" % dir.ADDR_NAME) else: try: lnk.name, lnk.subject, lnk.options = address.parse(addr) # XXX: subject if lnk.options is None: lnk.options = {} except address.LexError, e: return MalformedAddress(text=str(e)) except address.ParseError, e: return MalformedAddress(text=str(e)) def validate_options(self, lnk, dir): ctx = Context() err = dir.VALIDATOR.validate(lnk.options, ctx) if err: return InvalidOption(text="error in options: %s" % err) def resolve_declare(self, sst, lnk, dir, action): declare = lnk.options.get("create") in ("always", dir) assrt = lnk.options.get("assert") in ("always", dir) def do_resolved(type, subtype): err = None if type is None: if declare: err = self.declare(sst, lnk, action) else: err = NotFound(text="no such queue: %s" % lnk.name) else: if assrt: expected = lnk.options.get("node", {}).get("type") if expected and type != expected: err = AssertionFailed(text="expected %s, got %s" % (expected, type)) if err is None: action(type, subtype) if err: tgt = lnk.target tgt.error = err del self._attachments[tgt] tgt.closed = True return self.resolve(sst, lnk.name, do_resolved, force=declare) def resolve(self, sst, name, action, force=False): if not force: try: type, subtype = self.address_cache[name] action(type, subtype) return except KeyError: pass args = [] def do_result(r): args.append(r) def do_action(r): do_result(r) er, qr = args if er.not_found and not qr.queue: type, subtype = None, None elif qr.queue: type, subtype = "queue", None else: type, subtype = "topic", er.type if type is not None: self.address_cache[name] = (type, subtype) action(type, subtype) sst.write_query(ExchangeQuery(name), do_result) sst.write_query(QueueQuery(name), do_action) def declare(self, sst, lnk, action): name = lnk.name props = lnk.options.get("node", {}) durable = props.get("durable", DURABLE_DEFAULT) type = props.get("type", "queue") declare = props.get("x-declare", {}) if type == "topic": cmd = ExchangeDeclare(exchange=name, durable=durable) bindings = get_bindings(props, exchange=name) elif type == "queue": cmd = QueueDeclare(queue=name, durable=durable) bindings = get_bindings(props, queue=name) else: raise ValueError(type) sst.apply_overrides(cmd, declare) if type == "topic": if cmd.type is None: cmd.type = "topic" subtype = cmd.type else: subtype = None cmds = [cmd] cmds.extend(bindings) def declared(): self.address_cache[name] = (type, subtype) action(type, subtype) sst.write_cmds(cmds, declared) def delete(self, sst, name, action): def deleted(): del self.address_cache[name] action() def do_delete(type, subtype): if type == "topic": sst.write_cmd(ExchangeDelete(name), deleted) elif type == "queue": sst.write_cmd(QueueDelete(name), deleted) elif type is None: action() else: raise ValueError(type) self.resolve(sst, name, do_delete, force=True) def process(self, ssn): if ssn.closed or ssn.closing: return sst = self._attachments[ssn] while sst.outgoing_idx < len(ssn.outgoing): msg = ssn.outgoing[sst.outgoing_idx] snd = msg._sender # XXX: should check for sender error here _snd = self._attachments.get(snd) if _snd and snd.linked: self.send(snd, msg) sst.outgoing_idx += 1 else: break for snd in ssn.senders: # XXX: should included snd.acked in this if snd.synced >= snd.queued and sst.need_sync: sst.write_cmd(ExecutionSync(), sync_noop) for rcv in ssn.receivers: self.process_receiver(rcv) if ssn.acked: messages = ssn.acked[sst.acked_idx:] if messages: ids = RangedSet() disposed = [(DEFAULT_DISPOSITION, [])] acked = [] for m in messages: # XXX: we're ignoring acks that get lost when disconnected, # could we deal this via some message-id based purge? if m._transfer_id is None: acked.append(m) continue ids.add(m._transfer_id) if m._receiver._accept_mode is accept_mode.explicit: disp = m._disposition or DEFAULT_DISPOSITION last, msgs = disposed[-1] if disp.type is last.type and disp.options == last.options: msgs.append(m) else: disposed.append((disp, [m])) else: acked.append(m) for range in ids: sst.executed.add_range(range) sst.write_op(SessionCompleted(sst.executed)) def ack_acker(msgs): def ack_ack(): for m in msgs: ssn.acked.remove(m) sst.acked_idx -= 1 # XXX: should this check accept_mode too? if not ssn.transactional: sst.acked.remove(m) return ack_ack for disp, msgs in disposed: if not msgs: continue if disp.type is None: op = MessageAccept elif disp.type is RELEASED: op = MessageRelease elif disp.type is REJECTED: op = MessageReject sst.write_cmd(op(RangedSet(*[m._transfer_id for m in msgs]), **disp.options), ack_acker(msgs)) if log.isEnabledFor(DEBUG): for m in msgs: log.debug("SACK[%s]: %s, %s", ssn.log_id, m, m._disposition) sst.acked.extend(messages) sst.acked_idx += len(messages) ack_acker(acked)() if ssn.committing and not sst.committing: def commit_ok(): del sst.acked[:] ssn.committing = False ssn.committed = True ssn.aborting = False ssn.aborted = False sst.committing = False sst.write_cmd(TxCommit(), commit_ok) sst.committing = True if ssn.aborting and not sst.aborting: sst.aborting = True def do_rb(): messages = sst.acked + ssn.unacked + ssn.incoming ids = RangedSet(*[m._transfer_id for m in messages]) for range in ids: sst.executed.add_range(range) sst.write_op(SessionCompleted(sst.executed)) sst.write_cmd(MessageRelease(ids, True)) sst.write_cmd(TxRollback(), do_rb_ok) def do_rb_ok(): del ssn.incoming[:] del ssn.unacked[:] del sst.acked[:] for rcv in ssn.receivers: rcv.impending = rcv.received rcv.returned = rcv.received # XXX: do we need to update granted here as well? for rcv in ssn.receivers: self.process_receiver(rcv) ssn.aborting = False ssn.aborted = True ssn.committing = False ssn.committed = False sst.aborting = False for rcv in ssn.receivers: _rcv = self._attachments[rcv] sst.write_cmd(MessageStop(_rcv.destination)) sst.write_cmd(ExecutionSync(), do_rb) def grant(self, rcv): sst = self._attachments[rcv.session] _rcv = self._attachments.get(rcv) if _rcv is None or not rcv.linked or _rcv.closing or _rcv.draining: return if rcv.granted is UNLIMITED: if rcv.impending is UNLIMITED: delta = 0 else: delta = UNLIMITED elif rcv.impending is UNLIMITED: delta = -1 else: delta = max(rcv.granted, rcv.received) - rcv.impending if delta is UNLIMITED: if not _rcv.bytes_open: sst.write_cmd(MessageFlow(_rcv.destination, credit_unit.byte, UNLIMITED.value)) _rcv.bytes_open = True sst.write_cmd(MessageFlow(_rcv.destination, credit_unit.message, UNLIMITED.value)) rcv.impending = UNLIMITED elif delta > 0: if not _rcv.bytes_open: sst.write_cmd(MessageFlow(_rcv.destination, credit_unit.byte, UNLIMITED.value)) _rcv.bytes_open = True sst.write_cmd(MessageFlow(_rcv.destination, credit_unit.message, delta)) rcv.impending += delta elif delta < 0 and not rcv.draining: _rcv.draining = True def do_stop(): rcv.impending = rcv.received _rcv.draining = False _rcv.bytes_open = False self.grant(rcv) sst.write_cmd(MessageStop(_rcv.destination), do_stop) if rcv.draining: _rcv.draining = True def do_flush(): rcv.impending = rcv.received rcv.granted = rcv.impending _rcv.draining = False _rcv.bytes_open = False rcv.draining = False sst.write_cmd(MessageFlush(_rcv.destination), do_flush) def process_receiver(self, rcv): if rcv.closed: return self.grant(rcv) def send(self, snd, msg): sst = self._attachments[snd.session] _snd = self._attachments[snd] if msg.subject is None or _snd._exchange == "": rk = _snd._routing_key else: rk = msg.subject if msg.subject is None: subject = _snd.subject else: subject = msg.subject # XXX: do we need to query to figure out how to create the reply-to interoperably? if msg.reply_to: rt = addr2reply_to(msg.reply_to) else: rt = None content_encoding = msg.properties.get("x-amqp-0-10.content-encoding") dp = DeliveryProperties(routing_key=rk) mp = MessageProperties(message_id=msg.id, user_id=msg.user_id, reply_to=rt, correlation_id=msg.correlation_id, app_id = msg.properties.get("x-amqp-0-10.app-id"), content_type=msg.content_type, content_encoding=content_encoding, application_headers=msg.properties) if subject is not None: if mp.application_headers is None: mp.application_headers = {} mp.application_headers[SUBJECT] = subject if msg.durable is not None: if msg.durable: dp.delivery_mode = delivery_mode.persistent else: dp.delivery_mode = delivery_mode.non_persistent if msg.priority is not None: dp.priority = msg.priority if msg.ttl is not None: dp.ttl = long(msg.ttl*1000) enc, dec = get_codec(msg.content_type) body = enc(msg.content) # XXX: this is not safe for out of order, can this be triggered by pre_ack? def msg_acked(): # XXX: should we log the ack somehow too? snd.acked += 1 m = snd.session.outgoing.pop(0) sst.outgoing_idx -= 1 log.debug("RACK[%s]: %s", sst.session.log_id, msg) assert msg == m xfr = MessageTransfer(destination=_snd._exchange, headers=(dp, mp), payload=body) if _snd.pre_ack: sst.write_cmd(xfr) else: sst.write_cmd(xfr, msg_acked, sync=msg._sync) log.debug("SENT[%s]: %s", sst.session.log_id, msg) if _snd.pre_ack: msg_acked() def do_message_transfer(self, xfr): sst = self.get_sst(xfr) ssn = sst.session msg = self._decode(xfr) rcv = sst.destinations[xfr.destination].target msg._receiver = rcv if rcv.impending is not UNLIMITED: assert rcv.received < rcv.impending, "%s, %s" % (rcv.received, rcv.impending) rcv.received += 1 log.debug("RCVD[%s]: %s", ssn.log_id, msg) ssn.incoming.append(msg) def _decode(self, xfr): dp = EMPTY_DP mp = EMPTY_MP for h in xfr.headers: if isinstance(h, DeliveryProperties): dp = h elif isinstance(h, MessageProperties): mp = h ap = mp.application_headers enc, dec = get_codec(mp.content_type) content = dec(xfr.payload) msg = Message(content) msg.id = mp.message_id if ap is not None: msg.subject = ap.get(SUBJECT) msg.user_id = mp.user_id if mp.reply_to is not None: msg.reply_to = reply_to2addr(mp.reply_to) msg.correlation_id = mp.correlation_id if dp.delivery_mode is not None: msg.durable = dp.delivery_mode == delivery_mode.persistent msg.priority = dp.priority if dp.ttl is not None: msg.ttl = dp.ttl/1000.0 msg.redelivered = dp.redelivered msg.properties = mp.application_headers or {} if mp.app_id is not None: msg.properties["x-amqp-0-10.app-id"] = mp.app_id if mp.content_encoding is not None: msg.properties["x-amqp-0-10.content-encoding"] = mp.content_encoding if dp.routing_key is not None: msg.properties["x-amqp-0-10.routing-key"] = dp.routing_key if dp.timestamp is not None: msg.properties["x-amqp-0-10.timestamp"] = dp.timestamp msg.content_type = mp.content_type msg._transfer_id = xfr.id return msg qpid-python-0.22/python/qpid/messaging/address.py0000644000175000017500000001041111513625147020225 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import re from qpid.lexer import Lexicon, LexError from qpid.parser import Parser, ParseError l = Lexicon() LBRACE = l.define("LBRACE", r"\{") RBRACE = l.define("RBRACE", r"\}") LBRACK = l.define("LBRACK", r"\[") RBRACK = l.define("RBRACK", r"\]") COLON = l.define("COLON", r":") SEMI = l.define("SEMI", r";") SLASH = l.define("SLASH", r"/") COMMA = l.define("COMMA", r",") NUMBER = l.define("NUMBER", r'[+-]?[0-9]*\.?[0-9]+') ID = l.define("ID", r'[a-zA-Z_](?:[a-zA-Z0-9_-]*[a-zA-Z0-9_])?') STRING = l.define("STRING", r""""(?:[^\\"]|\\.)*"|'(?:[^\\']|\\.)*'""") ESC = l.define("ESC", r"\\[^ux]|\\x[0-9a-fA-F][0-9a-fA-F]|\\u[0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F]") SYM = l.define("SYM", r"[.#*%@$^!+-]") WSPACE = l.define("WSPACE", r"[ \n\r\t]+") EOF = l.eof("EOF") LEXER = l.compile() def lex(st): return LEXER.lex(st) def tok2str(tok): if tok.type is STRING: return eval(tok.value) elif tok.type is ESC: if tok.value[1] == "x": return eval('"%s"' % tok.value) elif tok.value[1] == "u": return eval('u"%s"' % tok.value) else: return tok.value[1] else: return tok.value CONSTANTS = { "True": True, "true": True, "False": False, "false": False, "None": None } def tok2obj(tok): if tok.type == ID: return CONSTANTS.get(tok.value, tok.value) elif tok.type in (STRING, NUMBER): return eval(tok.value) else: return tok.value def toks2str(toks): if toks: return "".join(map(tok2str, toks)) else: return None class AddressParser(Parser): def __init__(self, tokens): Parser.__init__(self, [t for t in tokens if t.type is not WSPACE]) def parse(self): result = self.address() self.eat(EOF) return result def address(self): name = toks2str(self.eat_until(SLASH, SEMI, EOF)) if name is None: raise ParseError(self.next()) if self.matches(SLASH): self.eat(SLASH) subject = toks2str(self.eat_until(SEMI, EOF)) else: subject = None if self.matches(SEMI): self.eat(SEMI) options = self.map() else: options = None return name, subject, options def map(self): self.eat(LBRACE) result = {} while True: if self.matches(NUMBER, STRING, ID, LBRACE, LBRACK): n, v = self.keyval() result[n] = v if self.matches(COMMA): self.eat(COMMA) elif self.matches(RBRACE): break else: raise ParseError(self.next(), COMMA, RBRACE) elif self.matches(RBRACE): break else: raise ParseError(self.next(), NUMBER, STRING, ID, LBRACE, LBRACK, RBRACE) self.eat(RBRACE) return result def keyval(self): key = self.value() self.eat(COLON) val = self.value() return (key, val) def value(self): if self.matches(NUMBER, STRING, ID): return tok2obj(self.eat()) elif self.matches(LBRACE): return self.map() elif self.matches(LBRACK): return self.list() else: raise ParseError(self.next(), NUMBER, STRING, ID, LBRACE, LBRACK) def list(self): self.eat(LBRACK) result = [] while True: if self.matches(RBRACK): break else: result.append(self.value()) if self.matches(COMMA): self.eat(COMMA) elif self.matches(RBRACK): break else: raise ParseError(self.next(), COMMA, RBRACK) self.eat(RBRACK) return result def parse(addr): return AddressParser(lex(addr)).parse() __all__ = ["parse", "ParseError"] qpid-python-0.22/python/qpid/specs/0000755000175000017500000000000012151237730015365 5ustar mbambaqpid-python-0.22/python/qpid/specs/amqp-0-9-1-stripped.xml0000644000175000017500000005044112142746464021353 0ustar mbamba qpid-python-0.22/python/qpid/specs/amqp-0-10.dtd0000644000175000017500000002224711337330527017405 0ustar mbamba qpid-python-0.22/python/qpid/specs/amqp-0-8-qpid-stripped.xml0000644000175000017500000007767512142746464022171 0ustar mbamba qpid-python-0.22/python/qpid/specs/amqp-0-9-qpid-stripped.xml0000644000175000017500000011030712142746464022146 0ustar mbamba qpid-python-0.22/python/qpid/specs/amqp-0-10-qpid-errata-stripped.xml0000644000175000017500000013623412142746464023501 0ustar mbamba
qpid-python-0.22/python/qpid/specs/amqp-0-10-stripped.xml0000644000175000017500000013604712142746464021274 0ustar mbamba
qpid-python-0.22/python/qpid/util.py0000644000175000017500000001314312046451714015604 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import os, socket, time, textwrap, re, sys try: from ssl import wrap_socket as ssl except ImportError: from socket import ssl as wrap_socket class ssl: def __init__(self, sock, keyfile=None, certfile=None, trustfile=None): # Bug (QPID-4337): this is the "old" version of python SSL. # The private key is required. If a certificate is given, but no # keyfile, assume the key is contained in the certificate if certfile and not keyfile: keyfile = certfile self.sock = sock self.ssl = wrap_socket(sock, keyfile=keyfile, certfile=certfile) def recv(self, n): return self.ssl.read(n) def send(self, s): return self.ssl.write(s) def close(self): self.sock.close() def get_client_properties_with_defaults(provided_client_properties={}): ppid = 0 try: ppid = os.getppid() except: pass client_properties = {"product": "qpid python client", "version": "development", "platform": os.name, "qpid.client_process": os.path.basename(sys.argv[0]), "qpid.client_pid": os.getpid(), "qpid.client_ppid": ppid} if provided_client_properties: client_properties.update(provided_client_properties) return client_properties def connect(host, port): for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = socket.socket(af, socktype, proto) try: sock.connect(sa) break except socket.error, msg: sock.close else: # If we got here then we couldn't connect (yet) raise return sock def listen(host, port, predicate = lambda: True, bound = lambda: None): sock = socket.socket() sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((host, port)) sock.listen(5) bound() while predicate(): s, a = sock.accept() yield s def mtime(filename): return os.stat(filename).st_mtime def wait(condition, predicate, timeout=None): condition.acquire() try: passed = 0 start = time.time() while not predicate(): if timeout is None: # using the timed wait prevents keyboard interrupts from being # blocked while waiting condition.wait(3) elif passed < timeout: condition.wait(timeout - passed) else: return False passed = time.time() - start return True finally: condition.release() def notify(condition, action=lambda: None): condition.acquire() try: action() condition.notifyAll() finally: condition.release() def fill(text, indent, heading = None): sub = indent * " " if heading: if not text: return (indent - 2) * " " + heading init = (indent - 2) * " " + heading + " -- " else: init = sub w = textwrap.TextWrapper(initial_indent = init, subsequent_indent = sub) return w.fill(" ".join(text.split())) class URL: RE = re.compile(r""" # [ :// ] [ [ / ] @] ( | \[ \] ) [ : ] ^ (?: ([^:/@]+)://)? (?: ([^:/@]+) (?: / ([^:/@]+) )? @)? (?: ([^@:/\[]+) | \[ ([a-f0-9:.]+) \] ) (?: :([0-9]+))?$ """, re.X | re.I) AMQPS = "amqps" AMQP = "amqp" def __init__(self, s=None, **kwargs): if s is None: self.scheme = kwargs.get('scheme', None) self.user = kwargs.get('user', None) self.password = kwargs.get('password', None) self.host = kwargs.get('host', None) self.port = kwargs.get('port', None) if self.host is None: raise ValueError('Host required for url') elif isinstance(s, URL): self.scheme = s.scheme self.user = s.user self.password = s.password self.host = s.host self.port = s.port else: match = URL.RE.match(s) if match is None: raise ValueError(s) self.scheme, self.user, self.password, host4, host6, port = match.groups() self.host = host4 or host6 if port is None: self.port = None else: self.port = int(port) def __repr__(self): return "URL(%r)" % str(self) def __str__(self): s = "" if self.scheme: s += "%s://" % self.scheme if self.user: s += self.user if self.password: s += "/%s" % self.password s += "@" if ':' not in self.host: s += self.host else: s += "[%s]" % self.host if self.port: s += ":%s" % self.port return s def __eq__(self, url): if isinstance(url, basestring): url = URL(url) return \ self.scheme==url.scheme and \ self.user==url.user and self.password==url.password and \ self.host==url.host and self.port==url.port def __ne__(self, url): return not self.__eq__(url) def default(value, default): if value is None: return default else: return value qpid-python-0.22/python/qpid/content.py0000644000175000017500000000322310657105144016276 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ A simple python representation for AMQP content. """ def default(val, defval): if val == None: return defval else: return val class Content: def __init__(self, body = "", children = None, properties = None): self.body = body self.children = default(children, []) self.properties = default(properties, {}) def size(self): return len(self.body) def weight(self): return len(self.children) def __getitem__(self, name): return self.properties[name] def __setitem__(self, name, value): self.properties[name] = value def __delitem__(self, name): del self.properties[name] def __str__(self): if self.children: return "%s [%s] %s" % (self.properties, ", ".join(map(str, self.children)), self.body) else: return "%s %s" % (self.properties, self.body) qpid-python-0.22/python/qpid/__init__.py0000644000175000017500000000420411326041427016360 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import connection class Struct: def __init__(self, type, *args, **kwargs): self.__dict__["type"] = type self.__dict__["_values"] = {} if len(args) > len(self.type.fields): raise TypeError("too many args") for a, f in zip(args, self.type.fields): self.set(f.name, a) for k, a in kwargs.items(): self.set(k, a) def _check(self, attr): field = self.type.fields.byname.get(attr) if field == None: raise AttributeError(attr) return field def exists(self, attr): return self.type.fields.byname.has_key(attr) def has(self, attr): self._check(attr) return self._values.has_key(attr) def set(self, attr, value): self._check(attr) self._values[attr] = value def get(self, attr): field = self._check(attr) return self._values.get(attr, field.default()) def clear(self, attr): self._check(attr) del self._values[attr] def __setattr__(self, attr, value): self.set(attr, value) def __getattr__(self, attr): return self.get(attr) def __delattr__(self, attr): self.clear(attr) def __setitem__(self, attr, value): self.set(attr, value) def __getitem__(self, attr): return self.get(attr) def __delitem__(self, attr): self.clear(attr) def __str__(self): return "%s %s" % (self.type, self._values) def __repr__(self): return str(self) qpid-python-0.22/python/qpid/concurrency.py0000644000175000017500000000560512142746464017173 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import compat, inspect, time def synchronized(meth): args, vargs, kwargs, defs = inspect.getargspec(meth) scope = {} scope["meth"] = meth exec """ def %s%s: %s %s._lock.acquire() try: return meth%s finally: %s._lock.release() """ % (meth.__name__, inspect.formatargspec(args, vargs, kwargs, defs), repr(inspect.getdoc(meth)), args[0], inspect.formatargspec(args, vargs, kwargs, defs, formatvalue=lambda x: ""), args[0]) in scope return scope[meth.__name__] class Waiter(object): def __init__(self, condition): self.condition = condition def wait(self, predicate, timeout=None): passed = 0 start = time.time() while not predicate(): if timeout is None: # XXX: this timed wait thing is not necessary for the fast # condition from this module, only for the condition impl from # the threading module # using the timed wait prevents keyboard interrupts from being # blocked while waiting self.condition.wait(3) elif passed < timeout: self.condition.wait(timeout - passed) else: return bool(predicate()) passed = time.time() - start return True def notify(self): self.condition.notify() def notifyAll(self): self.condition.notifyAll() class Condition: def __init__(self, lock): self.lock = lock self.waiters = [] self.waiting = [] def notify(self): assert self.lock._is_owned() if self.waiting: self.waiting[0].wakeup() def notifyAll(self): assert self.lock._is_owned() for w in self.waiting: w.wakeup() def wait(self, timeout=None): assert self.lock._is_owned() if not self.waiters: self.waiters.append(compat.selectable_waiter()) sw = self.waiters.pop(0) self.waiting.append(sw) try: st = self.lock._release_save() sw.wait(timeout) finally: self.lock._acquire_restore(st) self.waiting.remove(sw) self.waiters.append(sw) def gc(self): assert self.lock._is_owned() while self.waiters: sw = self.waiters.pop(0) sw.close() qpid-python-0.22/python/qpid/mimetype.py0000644000175000017500000000521711307763616016471 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import re, rfc822 from lexer import Lexicon, LexError from parser import Parser, ParseError l = Lexicon() LPAREN = l.define("LPAREN", r"\(") RPAREN = l.define("LPAREN", r"\)") SLASH = l.define("SLASH", r"/") SEMI = l.define("SEMI", r";") EQUAL = l.define("EQUAL", r"=") TOKEN = l.define("TOKEN", r'[^()<>@,;:\\"/\[\]?= ]+') STRING = l.define("STRING", r'"(?:[^\\"]|\\.)*"') WSPACE = l.define("WSPACE", r"[ \n\r\t]+") EOF = l.eof("EOF") LEXER = l.compile() def lex(st): return LEXER.lex(st) class MimeTypeParser(Parser): def __init__(self, tokens): Parser.__init__(self, [t for t in tokens if t.type is not WSPACE]) def parse(self): result = self.mimetype() self.eat(EOF) return result def mimetype(self): self.remove_comments() self.reset() type = self.eat(TOKEN).value.lower() self.eat(SLASH) subtype = self.eat(TOKEN).value.lower() params = [] while True: if self.matches(SEMI): params.append(self.parameter()) else: break return type, subtype, params def remove_comments(self): while True: self.eat_until(LPAREN, EOF) if self.matches(LPAREN): self.remove(*self.comment()) else: break def comment(self): start = self.eat(LPAREN) while True: self.eat_until(LPAREN, RPAREN) if self.matches(LPAREN): self.comment() else: break end = self.eat(RPAREN) return start, end def parameter(self): self.eat(SEMI) name = self.eat(TOKEN).value self.eat(EQUAL) value = self.value() return name, value def value(self): if self.matches(TOKEN): return self.eat().value elif self.matches(STRING): return rfc822.unquote(self.eat().value) else: raise ParseError(self.next(), TOKEN, STRING) def parse(addr): return MimeTypeParser(lex(addr)).parse() __all__ = ["parse", "ParseError"] qpid-python-0.22/python/qpid/framer.py0000644000175000017500000000706212004253175016101 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import struct, socket from exceptions import Closed from packer import Packer from threading import RLock from logging import getLogger raw = getLogger("qpid.io.raw") frm = getLogger("qpid.io.frm") class FramingError(Exception): pass class Framer(Packer): HEADER="!4s4B" def __init__(self, sock): self.sock = sock self.sock_lock = RLock() self.tx_buf = "" self.rx_buf = "" self.security_layer_tx = None self.security_layer_rx = None self.maxbufsize = 65535 def aborted(self): return False def write(self, buf): self.tx_buf += buf def flush(self): self.sock_lock.acquire() try: if self.security_layer_tx: try: cipher_buf = self.security_layer_tx.encode(self.tx_buf) except SASLError, e: raise Closed(str(e)) self._write(cipher_buf) else: self._write(self.tx_buf) self.tx_buf = "" frm.debug("FLUSHED") finally: self.sock_lock.release() def _write(self, buf): while buf: try: n = self.sock.send(buf) except socket.timeout: if self.aborted(): raise Closed() else: continue raw.debug("SENT %r", buf[:n]) buf = buf[n:] ## ## Implementation Note: ## ## This function was modified to use the SASL security layer for content ## decryption. As such, the socket read should read in "self.maxbufsize" ## instead of "n" (the requested number of octets). However, since this ## is one of two places in the code where the socket is read, the read ## size had to be left at "n". This is because this function is ## apparently only used to read the first 8 octets from a TCP socket. If ## we read beyond "n" octets, the remaing octets won't be processed and ## the connection handshake will fail. ## def read(self, n): while len(self.rx_buf) < n: try: s = self.sock.recv(n) # NOTE: instead of "n", arg should be "self.maxbufsize" if self.security_layer_rx: try: s = self.security_layer_rx.decode(s) except SASLError, e: raise Closed(str(e)) except socket.timeout: if self.aborted(): raise Closed() else: continue except socket.error, e: if self.rx_buf != "": raise e else: raise Closed() if len(s) == 0: raise Closed() self.rx_buf += s raw.debug("RECV %r", s) data = self.rx_buf[0:n] self.rx_buf = self.rx_buf[n:] return data def read_header(self): return self.unpack(Framer.HEADER) def write_header(self, major, minor): self.sock_lock.acquire() try: self.pack(Framer.HEADER, "AMQP", 1, 1, major, minor) self.flush() finally: self.sock_lock.release() qpid-python-0.22/python/qpid/specs_config.py0000644000175000017500000000223412142746464017276 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import os AMQP_SPEC_DIR=os.path.join(os.path.dirname(os.path.abspath(__file__)), "specs") amqp_spec = os.path.join(AMQP_SPEC_DIR, "amqp-0-10-qpid-errata-stripped.xml") amqp_spec_0_8 = os.path.join(AMQP_SPEC_DIR, "amqp-0-8-qpid-stripped.xml") amqp_spec_0_9 = os.path.join(AMQP_SPEC_DIR, "amqp-0-9-qpid-stripped.xml") amqp_spec_0_9_1 = os.path.join(AMQP_SPEC_DIR, "amqp-0-9-1-stripped.xml") qpid-python-0.22/python/qpid/exceptions.py0000644000175000017500000000156711240310343017002 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # class Closed(Exception): pass class Timeout(Exception): pass class VersionError(Exception): pass qpid-python-0.22/python/qpid/session.py0000644000175000017500000001755711326041427016323 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from threading import Condition, RLock, Lock, currentThread from generator import command_invoker from datatypes import RangedSet, Struct, Future from codec010 import StringCodec from queue import Queue from datatypes import Message, serial from ops import Command, MessageTransfer from util import wait, notify from exceptions import * from logging import getLogger log = getLogger("qpid.io.cmd") msg = getLogger("qpid.io.msg") class SessionException(Exception): pass class SessionClosed(SessionException): pass class SessionDetached(SessionException): pass def client(*args): return Client(*args) def server(*args): return Server(*args) INCOMPLETE = object() class Session(command_invoker()): def __init__(self, name, auto_sync=True, timeout=10, delegate=client): self.name = name self.auto_sync = auto_sync self.need_sync = True self.timeout = timeout self.channel = None self.invoke_lock = Lock() self._closing = False self._closed = False self.condition = Condition() self.send_id = True self.receiver = Receiver(self) self.sender = Sender(self) self.lock = RLock() self._incoming = {} self.results = {} self.exceptions = [] self.delegate = delegate(self) def incoming(self, destination): self.lock.acquire() try: queue = self._incoming.get(destination) if queue == None: queue = Incoming(self, destination) self._incoming[destination] = queue return queue finally: self.lock.release() def error(self): exc = self.exceptions[:] if len(exc) == 0: return None elif len(exc) == 1: return exc[0] else: return tuple(exc) def sync(self, timeout=None): ch = self.channel if ch is not None and currentThread() == ch.connection.thread: raise SessionException("deadlock detected") if self.need_sync: self.execution_sync(sync=True) last = self.sender.next_id - 1 if not wait(self.condition, lambda: last in self.sender._completed or self.exceptions, timeout): raise Timeout() if self.exceptions: raise SessionException(self.error()) def close(self, timeout=None): self.invoke_lock.acquire() try: self._closing = True self.channel.session_detach(self.name) finally: self.invoke_lock.release() if not wait(self.condition, lambda: self._closed, timeout): raise Timeout() def closed(self): self.lock.acquire() try: if self._closed: return error = self.error() for id in self.results: f = self.results[id] f.error(error) self.results.clear() for q in self._incoming.values(): q.close(error) self._closed = True notify(self.condition) finally: self.lock.release() def invoke(self, op, args, kwargs): if issubclass(op, Command): self.invoke_lock.acquire() try: return self.do_invoke(op, args, kwargs) finally: self.invoke_lock.release() else: return op(*args, **kwargs) def do_invoke(self, op, args, kwargs): if self._closing: raise SessionClosed() ch = self.channel if ch == None: raise SessionDetached() if op == MessageTransfer: if len(args) == len(op.FIELDS) + 1: message = args[-1] args = args[:-1] else: message = kwargs.pop("message", None) if message is not None: kwargs["headers"] = message.headers kwargs["payload"] = message.body cmd = op(*args, **kwargs) cmd.sync = self.auto_sync or cmd.sync self.need_sync = not cmd.sync cmd.channel = ch.id if op.RESULT: result = Future(exception=SessionException) self.results[self.sender.next_id] = result self.send(cmd) log.debug("SENT %s", cmd) if op == MessageTransfer: msg.debug("SENT %s", cmd) if op.RESULT: if self.auto_sync: return result.get(self.timeout) else: return result elif self.auto_sync: self.sync(self.timeout) def received(self, cmd): self.receiver.received(cmd) self.dispatch(cmd) def dispatch(self, cmd): log.debug("RECV %s", cmd) result = getattr(self.delegate, cmd.NAME)(cmd) if result is INCOMPLETE: return elif result is not None: self.execution_result(cmd.id, result) self.receiver.completed(cmd) # XXX: don't forget to obey sync for manual completion as well if cmd.sync: self.channel.session_completed(self.receiver._completed) def send(self, cmd): self.sender.send(cmd) def __repr__(self): return '' % (self.name, self.channel) class Receiver: def __init__(self, session): self.session = session self.next_id = None self._completed = RangedSet() def received(self, cmd): if self.next_id == None: raise Exception("todo") cmd.id = self.next_id self.next_id += 1 def completed(self, cmd): if cmd.id == None: raise ValueError("cannot complete unidentified command") self._completed.add(cmd.id) def known_completed(self, commands): completed = RangedSet() for c in self._completed.ranges: for kc in commands.ranges: if c.lower in kc and c.upper in kc: break else: completed.add_range(c) self._completed = completed class Sender: def __init__(self, session): self.session = session self.next_id = serial(0) self.commands = [] self._completed = RangedSet() def send(self, cmd): ch = self.session.channel if ch is None: raise SessionDetached() cmd.id = self.next_id self.next_id += 1 if self.session.send_id: self.session.send_id = False ch.session_command_point(cmd.id, 0) self.commands.append(cmd) ch.connection.write_op(cmd) def completed(self, commands): idx = 0 while idx < len(self.commands): cmd = self.commands[idx] if cmd.id in commands: del self.commands[idx] else: idx += 1 for range in commands.ranges: self._completed.add(range.lower, range.upper) class Incoming(Queue): def __init__(self, session, destination): Queue.__init__(self) self.session = session self.destination = destination def start(self): self.session.message_set_flow_mode(self.destination, self.session.flow_mode.credit) for unit in self.session.credit_unit.VALUES: self.session.message_flow(self.destination, unit, 0xFFFFFFFFL) def stop(self): self.session.message_cancel(self.destination) self.listen(None) class Delegate: def __init__(self, session): self.session = session #XXX: do something with incoming accepts def message_accept(self, ma): None def execution_result(self, er): future = self.session.results.pop(er.command_id) future.set(er.value) def execution_exception(self, ex): self.session.exceptions.append(ex) class Client(Delegate): def message_transfer(self, cmd): m = Message(cmd.payload) m.headers = cmd.headers m.id = cmd.id messages = self.session.incoming(cmd.destination) messages.put(m) msg.debug("RECV %s", m) return INCOMPLETE qpid-python-0.22/python/qpid/tests/0000755000175000017500000000000012151237730015412 5ustar mbambaqpid-python-0.22/python/qpid/tests/messaging/0000755000175000017500000000000012151237730017367 5ustar mbambaqpid-python-0.22/python/qpid/tests/messaging/__init__.py0000644000175000017500000001326711442465077021522 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import time from math import ceil from qpid.harness import Skipped from qpid.messaging import * from qpid.tests import Test class Base(Test): def setup_connection(self): return None def setup_session(self): return None def setup_sender(self): return None def setup_receiver(self): return None def setup(self): self.test_id = uuid4() self.broker = self.config.broker try: self.conn = self.setup_connection() except ConnectError, e: raise Skipped(e) self.ssn = self.setup_session() self.snd = self.setup_sender() if self.snd is not None: self.snd.durable = self.durable() self.rcv = self.setup_receiver() def teardown(self): if self.conn is not None and self.conn.attached(): self.teardown_connection(self.conn) self.conn = None def teardown_connection(self, conn): conn.close(timeout=self.timeout()) def content(self, base, count = None): if count is None: return "%s[%s]" % (base, self.test_id) else: return "%s[%s, %s]" % (base, count, self.test_id) def message(self, base, count = None, **kwargs): return Message(content=self.content(base, count), **kwargs) def ping(self, ssn): PING_Q = 'ping-queue; {create: always, delete: always}' # send a message sender = ssn.sender(PING_Q, durable=self.durable()) content = self.content("ping") sender.send(content) receiver = ssn.receiver(PING_Q) msg = receiver.fetch(0) ssn.acknowledge() assert msg.content == content, "expected %r, got %r" % (content, msg.content) def drain(self, rcv, limit=None, timeout=0, expected=None, redelivered=False): messages = [] try: while limit is None or len(messages) < limit: messages.append(rcv.fetch(timeout=timeout)) except Empty: pass if expected is not None: self.assertEchos(expected, messages, redelivered) return messages def diff(self, m1, m2, excluded_properties=()): result = {} for attr in ("id", "subject", "user_id", "reply_to", "correlation_id", "durable", "priority", "ttl", "redelivered", "content_type", "content"): a1 = getattr(m1, attr) a2 = getattr(m2, attr) if a1 != a2: result[attr] = (a1, a2) p1 = dict(m1.properties) p2 = dict(m2.properties) for ep in excluded_properties: p1.pop(ep, None) p2.pop(ep, None) if p1 != p2: result["properties"] = (p1, p2) return result def assertEcho(self, msg, echo, redelivered=False): if not isinstance(msg, Message) or not isinstance(echo, Message): if isinstance(msg, Message): msg = msg.content if isinstance(echo, Message): echo = echo.content assert msg == echo, "expected %s, got %s" % (msg, echo) else: delta = self.diff(msg, echo, ("x-amqp-0-10.routing-key",)) mttl, ettl = delta.pop("ttl", (0, 0)) if redelivered: assert echo.redelivered, \ "expected %s to be redelivered: %s" % (msg, echo) if delta.has_key("redelivered"): del delta["redelivered"] assert mttl is not None and ettl is not None, "%s, %s" % (mttl, ettl) assert mttl >= ettl, "%s, %s" % (mttl, ettl) assert not delta, "expected %s, got %s, delta %s" % (msg, echo, delta) def assertEchos(self, msgs, echoes, redelivered=False): assert len(msgs) == len(echoes), "%s, %s" % (msgs, echoes) for m, e in zip(msgs, echoes): self.assertEcho(m, e, redelivered) def assertEmpty(self, rcv): contents = self.drain(rcv) assert len(contents) == 0, "%s is supposed to be empty: %s" % (rcv, contents) def assertAvailable(self, rcv, expected=None, lower=None, upper=None): if expected is not None: if lower is not None or upper is not None: raise ValueError("cannot specify lower or upper when specifying expected") lower = expected upper = expected else: if lower is None: lower = int(ceil(rcv.threshold*rcv.capacity)) if upper is None: upper = rcv.capacity p = rcv.available() if upper == lower: assert p == lower, "expected %s, got %s" % (lower, p) else: assert lower <= p <= upper, "expected %s to be in range [%s, %s]" % (p, lower, upper) def sleep(self): time.sleep(self.delay()) def delay(self): return float(self.config.defines.get("delay", "2")) def timeout(self): return float(self.config.defines.get("timeout", "60")) def get_bool(self, name): return self.config.defines.get(name, "false").lower() in ("true", "yes", "1") def durable(self): return self.get_bool("durable") def reconnect(self): return self.get_bool("reconnect") def transport(self): if self.broker.scheme == self.broker.AMQPS: return "ssl" else: return "tcp" def connection_options(self): return {"reconnect": self.reconnect(), "transport": self.transport()} import address, endpoints, message qpid-python-0.22/python/qpid/tests/messaging/endpoints.py0000644000175000017500000011412112005611322021733 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # setup, usage, teardown, errors(sync), errors(async), stress, soak, # boundary-conditions, config import errno, os, socket, sys, time from qpid import compat from qpid.compat import set from qpid.messaging import * from qpid.messaging.transports import TRANSPORTS from qpid.tests.messaging import Base from threading import Thread class SetupTests(Base): def testEstablish(self): self.conn = Connection.establish(self.broker, **self.connection_options()) self.ping(self.conn.session()) def testOpen(self): self.conn = Connection(self.broker, **self.connection_options()) self.conn.open() self.ping(self.conn.session()) def testOpenReconnectURLs(self): options = self.connection_options() options["reconnect_urls"] = [self.broker, self.broker] self.conn = Connection(self.broker, **options) self.conn.open() self.ping(self.conn.session()) def testTcpNodelay(self): self.conn = Connection.establish(self.broker, tcp_nodelay=True) assert self.conn._driver._transport.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) def testConnectError(self): try: # Specifying port 0 yields a bad address on Windows; port 4 is unassigned self.conn = Connection.establish("localhost:4") assert False, "connect succeeded" except ConnectError, e: assert "refused" in str(e) def testGetError(self): self.conn = Connection("localhost:0") try: self.conn.open() assert False, "connect succeeded" except ConnectError, e: assert self.conn.get_error() == e def use_fds(self): fds = [] try: while True: fds.append(os.open(getattr(os, "devnull", "/dev/null"), os.O_RDONLY)) except OSError, e: if e.errno != errno.EMFILE: raise e else: return fds def testOpenCloseResourceLeaks(self): fds = self.use_fds() try: for i in range(32): if fds: os.close(fds.pop()) for i in xrange(64): conn = Connection.establish(self.broker, **self.connection_options()) conn.close() finally: while fds: os.close(fds.pop()) def testOpenFailResourceLeaks(self): fds = self.use_fds() try: for i in range(32): if fds: os.close(fds.pop()) for i in xrange(64): conn = Connection("localhost:0", **self.connection_options()) # XXX: we need to force a waiter to be created for this test # to work conn._lock.acquire() conn._wait(lambda: False, timeout=0.001) conn._lock.release() try: conn.open() except ConnectError, e: pass finally: while fds: os.close(fds.pop()) def testReconnect(self): options = self.connection_options() real = TRANSPORTS["tcp"] class flaky: def __init__(self, conn, host, port): self.real = real(conn, host, port) self.sent_count = 0 self.recv_count = 0 def fileno(self): return self.real.fileno() def reading(self, reading): return self.real.reading(reading) def writing(self, writing): return self.real.writing(writing) def send(self, bytes): if self.sent_count > 2048: raise socket.error("fake error") n = self.real.send(bytes) self.sent_count += n return n def recv(self, n): if self.recv_count > 2048: return "" bytes = self.real.recv(n) self.recv_count += len(bytes) return bytes def close(self): self.real.close() TRANSPORTS["flaky"] = flaky options["reconnect"] = True options["reconnect_interval"] = 0 options["reconnect_limit"] = 100 options["reconnect_log"] = False options["transport"] = "flaky" self.conn = Connection.establish(self.broker, **options) ssn = self.conn.session() snd = ssn.sender("test-reconnect-queue; {create: always, delete: always}") rcv = ssn.receiver(snd.target) msgs = [self.message("testReconnect", i) for i in range(20)] for m in msgs: snd.send(m) content = set() drained = [] duplicates = [] try: while True: m = rcv.fetch(timeout=0) if m.content not in content: content.add(m.content) drained.append(m) else: duplicates.append(m) ssn.acknowledge(m) except Empty: pass # XXX: apparently we don't always get duplicates, should figure out why #assert duplicates, "no duplicates" assert len(drained) == len(msgs) for m, d in zip(msgs, drained): # XXX: we should figure out how to provide proper end to end # redelivered self.assertEcho(m, d, d.redelivered) class ConnectionTests(Base): def setup_connection(self): return Connection.establish(self.broker, **self.connection_options()) def testCheckClosed(self): assert not self.conn.check_closed() def testSessionAnon(self): ssn1 = self.conn.session() ssn2 = self.conn.session() self.ping(ssn1) self.ping(ssn2) assert ssn1 is not ssn2 def testSessionNamed(self): ssn1 = self.conn.session("one") ssn2 = self.conn.session("two") self.ping(ssn1) self.ping(ssn2) assert ssn1 is not ssn2 assert ssn1 is self.conn.session("one") assert ssn2 is self.conn.session("two") def testDetach(self): ssn = self.conn.session() self.ping(ssn) self.conn.detach() try: self.ping(ssn) assert False, "ping succeeded" except Detached: # this is the expected failure when pinging on a detached # connection pass self.conn.attach() self.ping(ssn) def testClose(self): self.conn.close() assert not self.conn.attached() def testSimultaneousClose(self): ssns = [self.conn.session() for i in range(3)] for s in ssns: for i in range(3): s.receiver("amq.topic") s.sender("amq.topic") def closer(errors): try: self.conn.close() except: _, e, _ = sys.exc_info() errors.append(compat.format_exc(e)) t1_errors = [] t2_errors = [] t1 = Thread(target=lambda: closer(t1_errors)) t2 = Thread(target=lambda: closer(t2_errors)) t1.start() t2.start() t1.join(self.delay()) t2.join(self.delay()) assert not t1_errors, t1_errors[0] assert not t2_errors, t2_errors[0] class hangable: def __init__(self, conn, host, port): self.tcp = TRANSPORTS["tcp"](conn, host, port) self.hung = False def hang(self): self.hung = True def fileno(self): return self.tcp.fileno() def reading(self, reading): if self.hung: return True else: return self.tcp.reading(reading) def writing(self, writing): if self.hung: return False else: return self.tcp.writing(writing) def send(self, bytes): if self.hung: return 0 else: return self.tcp.send(bytes) def recv(self, n): if self.hung: return "" else: return self.tcp.recv(n) def close(self): self.tcp.close() TRANSPORTS["hangable"] = hangable class TimeoutTests(Base): def setup_connection(self): options = self.connection_options() options["transport"] = "hangable" return Connection.establish(self.broker, **options) def setup_session(self): return self.conn.session() def setup_sender(self): return self.ssn.sender("amq.topic") def setup_receiver(self): return self.ssn.receiver("amq.topic; {link: {reliability: unreliable}}") def teardown_connection(self, conn): try: conn.detach(timeout=0) except Timeout: pass def hang(self): self.conn._driver._transport.hang() def timeoutTest(self, method): self.hang() try: method(timeout=self.delay()) assert False, "did not time out" except Timeout: pass def testSenderSync(self): self.snd.send(self.content("testSenderSync"), sync=False) self.timeoutTest(self.snd.sync) def testSenderClose(self): self.snd.send(self.content("testSenderClose"), sync=False) self.timeoutTest(self.snd.close) def testReceiverClose(self): self.timeoutTest(self.rcv.close) def testSessionSync(self): self.snd.send(self.content("testSessionSync"), sync=False) self.timeoutTest(self.ssn.sync) def testSessionClose(self): self.timeoutTest(self.ssn.close) def testConnectionDetach(self): self.timeoutTest(self.conn.detach) def testConnectionClose(self): self.timeoutTest(self.conn.close) ACK_QC = 'test-ack-queue; {create: always}' ACK_QD = 'test-ack-queue; {delete: always}' class SessionTests(Base): def setup_connection(self): return Connection.establish(self.broker, **self.connection_options()) def setup_session(self): return self.conn.session() def testSender(self): snd = self.ssn.sender('test-snd-queue; {create: sender, delete: receiver}', durable=self.durable()) snd2 = self.ssn.sender(snd.target, durable=self.durable()) assert snd is not snd2 snd2.close() content = self.content("testSender") snd.send(content) rcv = self.ssn.receiver(snd.target) msg = rcv.fetch(0) assert msg.content == content self.ssn.acknowledge(msg) def testReceiver(self): rcv = self.ssn.receiver('test-rcv-queue; {create: always}') rcv2 = self.ssn.receiver(rcv.source) assert rcv is not rcv2 rcv2.close() content = self.content("testReceiver") snd = self.ssn.sender(rcv.source, durable=self.durable()) snd.send(content) msg = rcv.fetch(0) assert msg.content == content self.ssn.acknowledge(msg) snd2 = self.ssn.receiver('test-rcv-queue; {delete: always}') def testDetachedReceiver(self): self.conn.detach() rcv = self.ssn.receiver("test-dis-rcv-queue; {create: always, delete: always}") m = self.content("testDetachedReceiver") self.conn.attach() snd = self.ssn.sender("test-dis-rcv-queue") snd.send(m) self.drain(rcv, expected=[m]) def testNextReceiver(self): ADDR = 'test-next-rcv-queue; {create: always, delete: always}' rcv1 = self.ssn.receiver(ADDR, capacity=UNLIMITED) rcv2 = self.ssn.receiver(ADDR, capacity=UNLIMITED) rcv3 = self.ssn.receiver(ADDR, capacity=UNLIMITED) snd = self.ssn.sender(ADDR) msgs = [] for i in range(10): content = self.content("testNextReceiver", i) snd.send(content) msgs.append(content) fetched = [] try: while True: rcv = self.ssn.next_receiver(timeout=self.delay()) assert rcv in (rcv1, rcv2, rcv3) assert rcv.available() > 0 fetched.append(rcv.fetch().content) except Empty: pass assert msgs == fetched, "expecting %s, got %s" % (msgs, fetched) self.ssn.acknowledge() #we set the capacity to 0 to prevent the deletion of the queue - #triggered the deletion policy when the first receiver is closed - #resulting in session exceptions being issued for the remaining #active subscriptions: for r in [rcv1, rcv2, rcv3]: r.capacity = 0 # XXX, we need a convenient way to assert that required queues are # empty on setup, and possibly also to drain queues on teardown def ackTest(self, acker, ack_capacity=None): # send a bunch of messages snd = self.ssn.sender(ACK_QC, durable=self.durable()) contents = [self.content("ackTest", i) for i in range(15)] for c in contents: snd.send(c) # drain the queue, verify the messages are there and then close # without acking rcv = self.ssn.receiver(ACK_QC) self.drain(rcv, expected=contents) self.ssn.close() # drain the queue again, verify that they are all the messages # were requeued, and ack this time before closing self.ssn = self.conn.session() if ack_capacity is not None: self.ssn.ack_capacity = ack_capacity rcv = self.ssn.receiver(ACK_QC) self.drain(rcv, expected=contents) acker(self.ssn) self.ssn.close() # drain the queue a final time and verify that the messages were # dequeued self.ssn = self.conn.session() rcv = self.ssn.receiver(ACK_QD) self.assertEmpty(rcv) def testAcknowledge(self): self.ackTest(lambda ssn: ssn.acknowledge()) def testAcknowledgeAsync(self): self.ackTest(lambda ssn: ssn.acknowledge(sync=False)) def testAcknowledgeAsyncAckCap0(self): try: try: self.ackTest(lambda ssn: ssn.acknowledge(sync=False), 0) assert False, "acknowledge shouldn't succeed with ack_capacity of zero" except InsufficientCapacity: pass finally: self.ssn.ack_capacity = UNLIMITED self.drain(self.ssn.receiver(ACK_QD)) self.ssn.acknowledge() def testAcknowledgeAsyncAckCap1(self): self.ackTest(lambda ssn: ssn.acknowledge(sync=False), 1) def testAcknowledgeAsyncAckCap5(self): self.ackTest(lambda ssn: ssn.acknowledge(sync=False), 5) def testAcknowledgeAsyncAckCapUNLIMITED(self): self.ackTest(lambda ssn: ssn.acknowledge(sync=False), UNLIMITED) def testRelease(self): msgs = [self.message("testRelease", i) for i in range(3)] snd = self.ssn.sender("test-release-queue; {create: always, delete: always}") for m in msgs: snd.send(m) rcv = self.ssn.receiver(snd.target) echos = self.drain(rcv, expected=msgs) self.ssn.acknowledge(echos[0]) self.ssn.acknowledge(echos[1], Disposition(RELEASED, set_redelivered=True)) self.ssn.acknowledge(echos[2], Disposition(RELEASED)) self.drain(rcv, limit=1, expected=msgs[1:2], redelivered=True) self.drain(rcv, expected=msgs[2:3]) self.ssn.acknowledge() def testReject(self): msgs = [self.message("testReject", i) for i in range(3)] snd = self.ssn.sender(""" test-reject-queue; { create: always, delete: always, node: { x-declare: { alternate-exchange: 'amq.topic' } } } """) for m in msgs: snd.send(m) rcv = self.ssn.receiver(snd.target) rej = self.ssn.receiver("amq.topic") echos = self.drain(rcv, expected=msgs) self.ssn.acknowledge(echos[0]) self.ssn.acknowledge(echos[1], Disposition(REJECTED)) self.ssn.acknowledge(echos[2], Disposition(REJECTED, code=0, text="test-reject")) self.drain(rej, expected=msgs[1:]) self.ssn.acknowledge() def send(self, ssn, target, base, count=1): snd = ssn.sender(target, durable=self.durable()) messages = [] for i in range(count): c = self.message(base, i) snd.send(c) messages.append(c) snd.close() return messages def txTest(self, commit): TX_Q = 'test-tx-queue; {create: sender, delete: receiver}' TX_Q_COPY = 'test-tx-queue-copy; {create: always, delete: always}' txssn = self.conn.session(transactional=True) messages = self.send(self.ssn, TX_Q, "txTest", 3) txrcv = txssn.receiver(TX_Q) txsnd = txssn.sender(TX_Q_COPY, durable=self.durable()) rcv = self.ssn.receiver(txrcv.source) copy_rcv = self.ssn.receiver(txsnd.target) self.assertEmpty(copy_rcv) for i in range(3): m = txrcv.fetch(0) txsnd.send(m) self.assertEmpty(copy_rcv) txssn.acknowledge() if commit: txssn.commit() self.assertEmpty(rcv) self.drain(copy_rcv, expected=messages) else: txssn.rollback() self.drain(rcv, expected=messages, redelivered=True) self.assertEmpty(copy_rcv) self.ssn.acknowledge() def testCommit(self): self.txTest(True) def testRollback(self): self.txTest(False) def txTestSend(self, commit): TX_SEND_Q = 'test-tx-send-queue; {create: sender, delete: receiver}' txssn = self.conn.session(transactional=True) messages = self.send(txssn, TX_SEND_Q, "txTestSend", 3) rcv = self.ssn.receiver(TX_SEND_Q) self.assertEmpty(rcv) if commit: txssn.commit() self.drain(rcv, expected=messages) self.ssn.acknowledge() else: txssn.rollback() self.assertEmpty(rcv) txssn.commit() self.assertEmpty(rcv) def testCommitSend(self): self.txTestSend(True) def testRollbackSend(self): self.txTestSend(False) def txTestAck(self, commit): TX_ACK_QC = 'test-tx-ack-queue; {create: always}' TX_ACK_QD = 'test-tx-ack-queue; {delete: always}' txssn = self.conn.session(transactional=True) txrcv = txssn.receiver(TX_ACK_QC) self.assertEmpty(txrcv) messages = self.send(self.ssn, TX_ACK_QC, "txTestAck", 3) self.drain(txrcv, expected=messages) if commit: txssn.acknowledge() else: txssn.rollback() self.drain(txrcv, expected=messages, redelivered=True) txssn.acknowledge() txssn.rollback() self.drain(txrcv, expected=messages, redelivered=True) txssn.commit() # commit without ack self.assertEmpty(txrcv) txssn.close() txssn = self.conn.session(transactional=True) txrcv = txssn.receiver(TX_ACK_QC) self.drain(txrcv, expected=messages, redelivered=True) txssn.acknowledge() txssn.commit() rcv = self.ssn.receiver(TX_ACK_QD) self.assertEmpty(rcv) txssn.close() self.assertEmpty(rcv) def testCommitAck(self): self.txTestAck(True) def testRollbackAck(self): self.txTestAck(False) def testDoubleCommit(self): ssn = self.conn.session(transactional=True) snd = ssn.sender("amq.direct/doubleCommit") rcv = ssn.receiver("amq.direct/doubleCommit") msgs = [self.message("testDoubleCommit", i, subject="doubleCommit") for i in range(3)] for m in msgs: snd.send(m) ssn.commit() self.drain(rcv, expected=msgs) ssn.acknowledge() ssn.commit() def testClose(self): self.ssn.close() try: self.ping(self.ssn) assert False, "ping succeeded" except Detached: pass RECEIVER_Q = 'test-receiver-queue; {create: always, delete: always}' class ReceiverTests(Base): def setup_connection(self): return Connection.establish(self.broker, **self.connection_options()) def setup_session(self): return self.conn.session() def setup_sender(self): return self.ssn.sender(RECEIVER_Q) def setup_receiver(self): return self.ssn.receiver(RECEIVER_Q) def send(self, base, count = None, sync=True): content = self.content(base, count) self.snd.send(content, sync=sync) return content def testFetch(self): try: msg = self.rcv.fetch(0) assert False, "unexpected message: %s" % msg except Empty: pass try: start = time.time() msg = self.rcv.fetch(self.delay()) assert False, "unexpected message: %s" % msg except Empty: elapsed = time.time() - start assert elapsed >= self.delay() one = self.send("testFetch", 1) two = self.send("testFetch", 2) three = self.send("testFetch", 3) msg = self.rcv.fetch(0) assert msg.content == one msg = self.rcv.fetch(self.delay()) assert msg.content == two msg = self.rcv.fetch() assert msg.content == three self.ssn.acknowledge() def fetchFromClosedTest(self, entry): entry.close() try: msg = self.rcv.fetch(0) assert False, "unexpected result: %s" % msg except Empty, e: assert False, "unexpected exception: %s" % e except LinkClosed, e: pass def testFetchFromClosedReceiver(self): self.fetchFromClosedTest(self.rcv) def testFetchFromClosedSession(self): self.fetchFromClosedTest(self.ssn) def testFetchFromClosedConnection(self): self.fetchFromClosedTest(self.conn) def fetchFromConcurrentCloseTest(self, entry): def closer(): self.sleep() entry.close() t = Thread(target=closer) t.start() try: msg = self.rcv.fetch() assert False, "unexpected result: %s" % msg except Empty, e: assert False, "unexpected exception: %s" % e except LinkClosed, e: pass t.join() def testFetchFromConcurrentCloseReceiver(self): self.fetchFromConcurrentCloseTest(self.rcv) def testFetchFromConcurrentCloseSession(self): self.fetchFromConcurrentCloseTest(self.ssn) def testFetchFromConcurrentCloseConnection(self): self.fetchFromConcurrentCloseTest(self.conn) def testCapacityIncrease(self): content = self.send("testCapacityIncrease") self.sleep() assert self.rcv.available() == 0 self.rcv.capacity = UNLIMITED self.sleep() assert self.rcv.available() == 1 msg = self.rcv.fetch(0) assert msg.content == content assert self.rcv.available() == 0 self.ssn.acknowledge() def testCapacityDecrease(self): self.rcv.capacity = UNLIMITED one = self.send("testCapacityDecrease", 1) self.sleep() assert self.rcv.available() == 1 msg = self.rcv.fetch(0) assert msg.content == one self.rcv.capacity = 0 two = self.send("testCapacityDecrease", 2) self.sleep() assert self.rcv.available() == 0 msg = self.rcv.fetch(0) assert msg.content == two self.ssn.acknowledge() def capacityTest(self, capacity, threshold=None): if threshold is not None: self.rcv.threshold = threshold self.rcv.capacity = capacity self.assertAvailable(self.rcv, 0) for i in range(2*capacity): self.send("capacityTest(%s, %s)" % (capacity, threshold), i, sync=False) self.snd.sync() self.sleep() self.assertAvailable(self.rcv) first = capacity/2 second = capacity - first self.drain(self.rcv, limit = first) self.sleep() self.assertAvailable(self.rcv) self.drain(self.rcv, limit = second) self.sleep() self.assertAvailable(self.rcv) drained = self.drain(self.rcv) assert len(drained) == capacity, "%s, %s" % (len(drained), drained) self.assertAvailable(self.rcv, 0) self.ssn.acknowledge() def testCapacity5(self): self.capacityTest(5) def testCapacity5Threshold1(self): self.capacityTest(5, 1) def testCapacity10(self): self.capacityTest(10) def testCapacity10Threshold1(self): self.capacityTest(10, 1) def testCapacity100(self): self.capacityTest(100) def testCapacity100Threshold1(self): self.capacityTest(100, 1) def testCapacityUNLIMITED(self): self.rcv.capacity = UNLIMITED self.assertAvailable(self.rcv, 0) for i in range(10): self.send("testCapacityUNLIMITED", i) self.sleep() self.assertAvailable(self.rcv, 10) self.drain(self.rcv) self.assertAvailable(self.rcv, 0) self.ssn.acknowledge() def testAvailable(self): self.rcv.capacity = UNLIMITED assert self.rcv.available() == 0 for i in range(3): self.send("testAvailable", i) self.sleep() assert self.rcv.available() == 3 for i in range(3, 10): self.send("testAvailable", i) self.sleep() assert self.rcv.available() == 10 self.drain(self.rcv, limit=3) assert self.rcv.available() == 7 self.drain(self.rcv) assert self.rcv.available() == 0 self.ssn.acknowledge() def testDoubleClose(self): m1 = self.content("testDoubleClose", 1) m2 = self.content("testDoubleClose", 2) snd = self.ssn.sender("""test-double-close; { create: always, delete: sender, node: { type: topic } } """) r1 = self.ssn.receiver(snd.target) r2 = self.ssn.receiver(snd.target) snd.send(m1) self.drain(r1, expected=[m1]) self.drain(r2, expected=[m1]) r1.close() snd.send(m2) self.drain(r2, expected=[m2]) r2.close() # XXX: need testClose def testMode(self): msgs = [self.content("testMode", 1), self.content("testMode", 2), self.content("testMode", 3)] for m in msgs: self.snd.send(m) rb = self.ssn.receiver('test-receiver-queue; {mode: browse}') rc = self.ssn.receiver('test-receiver-queue; {mode: consume}') self.drain(rb, expected=msgs) self.drain(rc, expected=msgs) rb2 = self.ssn.receiver(rb.source) self.assertEmpty(rb2) self.drain(self.rcv, expected=[]) # XXX: need testUnsettled() def unreliabilityTest(self, mode="unreliable"): msgs = [self.message("testUnreliable", i) for i in range(3)] snd = self.ssn.sender("test-unreliability-queue; {create: sender, delete: receiver}") rcv = self.ssn.receiver(snd.target) for m in msgs: snd.send(m) # close without ack on reliable receiver, messages should be requeued ssn = self.conn.session() rrcv = ssn.receiver("test-unreliability-queue") self.drain(rrcv, expected=msgs) ssn.close() # close without ack on unreliable receiver, messages should not be requeued ssn = self.conn.session() urcv = ssn.receiver("test-unreliability-queue; {link: {reliability: %s}}" % mode) self.drain(urcv, expected=msgs, redelivered=True) ssn.close() self.assertEmpty(rcv) def testUnreliable(self): self.unreliabilityTest(mode="unreliable") def testAtMostOnce(self): self.unreliabilityTest(mode="at-most-once") class AddressTests(Base): def setup_connection(self): return Connection.establish(self.broker, **self.connection_options()) def setup_session(self): return self.conn.session() def badOption(self, options, error): try: self.ssn.sender("test-bad-options-snd; %s" % options) assert False except InvalidOption, e: assert "error in options: %s" % error == str(e), e try: self.ssn.receiver("test-bad-options-rcv; %s" % options) assert False except InvalidOption, e: assert "error in options: %s" % error == str(e), e def testIllegalKey(self): self.badOption("{create: always, node: " "{this-property-does-not-exist: 3}}", "node: this-property-does-not-exist: " "illegal key") def testWrongValue(self): self.badOption("{create: asdf}", "create: asdf not in " "('always', 'sender', 'receiver', 'never')") def testWrongType1(self): self.badOption("{node: asdf}", "node: asdf is not a map") def testWrongType2(self): self.badOption("{node: {durable: []}}", "node: durable: [] is not a bool") def testCreateQueue(self): snd = self.ssn.sender("test-create-queue; {create: always, delete: always, " "node: {type: queue, durable: False, " "x-declare: {auto_delete: true}}}") content = self.content("testCreateQueue") snd.send(content) rcv = self.ssn.receiver("test-create-queue") self.drain(rcv, expected=[content]) def createExchangeTest(self, props=""): addr = """test-create-exchange; { create: always, delete: always, node: { type: topic, durable: False, x-declare: {auto_delete: true, %s} } }""" % props snd = self.ssn.sender(addr) snd.send("ping") rcv1 = self.ssn.receiver("test-create-exchange/first") rcv2 = self.ssn.receiver("test-create-exchange/first") rcv3 = self.ssn.receiver("test-create-exchange/second") for r in (rcv1, rcv2, rcv3): try: r.fetch(0) assert False except Empty: pass msg1 = Message(self.content("testCreateExchange", 1), subject="first") msg2 = Message(self.content("testCreateExchange", 2), subject="second") snd.send(msg1) snd.send(msg2) self.drain(rcv1, expected=[msg1.content]) self.drain(rcv2, expected=[msg1.content]) self.drain(rcv3, expected=[msg2.content]) def testCreateExchange(self): self.createExchangeTest() def testCreateExchangeDirect(self): self.createExchangeTest("type: direct") def testCreateExchangeTopic(self): self.createExchangeTest("type: topic") def testDeleteBySender(self): snd = self.ssn.sender("test-delete; {create: always}") snd.send("ping") snd.close() snd = self.ssn.sender("test-delete; {delete: always}") snd.send("ping") snd.close() try: self.ssn.sender("test-delete") except NotFound, e: assert "no such queue" in str(e) def testDeleteByReceiver(self): rcv = self.ssn.receiver("test-delete; {create: always, delete: always}") try: rcv.fetch(0) except Empty: pass rcv.close() try: self.ssn.receiver("test-delete") assert False except NotFound, e: assert "no such queue" in str(e) def testDeleteSpecial(self): snd = self.ssn.sender("amq.topic; {delete: always}") snd.send("asdf") try: snd.close() assert False, "successfully deleted amq.topic" except SessionError, e: assert e.code == 530 # XXX: need to figure out close after error self.conn._remove_session(self.ssn) def testNodeBindingsQueue(self): snd = self.ssn.sender(""" test-node-bindings-queue; { create: always, delete: always, node: { x-bindings: [{exchange: "amq.topic", key: "a.#"}, {exchange: "amq.direct", key: "b"}, {exchange: "amq.topic", key: "c.*"}] } } """) snd.send("one") snd_a = self.ssn.sender("amq.topic/a.foo") snd_b = self.ssn.sender("amq.direct/b") snd_c = self.ssn.sender("amq.topic/c.bar") snd_a.send("two") snd_b.send("three") snd_c.send("four") rcv = self.ssn.receiver("test-node-bindings-queue") self.drain(rcv, expected=["one", "two", "three", "four"]) def testNodeBindingsTopic(self): rcv = self.ssn.receiver("test-node-bindings-topic-queue; {create: always, delete: always}") rcv_a = self.ssn.receiver("test-node-bindings-topic-queue-a; {create: always, delete: always}") rcv_b = self.ssn.receiver("test-node-bindings-topic-queue-b; {create: always, delete: always}") rcv_c = self.ssn.receiver("test-node-bindings-topic-queue-c; {create: always, delete: always}") snd = self.ssn.sender(""" test-node-bindings-topic; { create: always, delete: always, node: { type: topic, x-bindings: [{queue: test-node-bindings-topic-queue, key: "#"}, {queue: test-node-bindings-topic-queue-a, key: "a.#"}, {queue: test-node-bindings-topic-queue-b, key: "b"}, {queue: test-node-bindings-topic-queue-c, key: "c.*"}] } } """) m1 = Message("one") m2 = Message(subject="a.foo", content="two") m3 = Message(subject="b", content="three") m4 = Message(subject="c.bar", content="four") snd.send(m1) snd.send(m2) snd.send(m3) snd.send(m4) self.drain(rcv, expected=[m1, m2, m3, m4]) self.drain(rcv_a, expected=[m2]) self.drain(rcv_b, expected=[m3]) self.drain(rcv_c, expected=[m4]) def testLinkBindings(self): m_a = self.message("testLinkBindings", 1, subject="a") m_b = self.message("testLinkBindings", 2, subject="b") self.ssn.sender("test-link-bindings-queue; {create: always, delete: always}") snd = self.ssn.sender("amq.topic") snd.send(m_a) snd.send(m_b) snd.close() rcv = self.ssn.receiver("test-link-bindings-queue") self.assertEmpty(rcv) snd = self.ssn.sender(""" amq.topic; { link: { x-bindings: [{queue: test-link-bindings-queue, key: a}] } } """) snd.send(m_a) snd.send(m_b) self.drain(rcv, expected=[m_a]) rcv.close() rcv = self.ssn.receiver(""" test-link-bindings-queue; { link: { x-bindings: [{exchange: "amq.topic", key: b}] } } """) snd.send(m_a) snd.send(m_b) self.drain(rcv, expected=[m_a, m_b]) def testSubjectOverride(self): snd = self.ssn.sender("amq.topic/a") rcv_a = self.ssn.receiver("amq.topic/a") rcv_b = self.ssn.receiver("amq.topic/b") m1 = self.content("testSubjectOverride", 1) m2 = self.content("testSubjectOverride", 2) snd.send(m1) snd.send(Message(subject="b", content=m2)) self.drain(rcv_a, expected=[m1]) self.drain(rcv_b, expected=[m2]) def testSubjectDefault(self): m1 = self.content("testSubjectDefault", 1) m2 = self.content("testSubjectDefault", 2) snd = self.ssn.sender("amq.topic/a") rcv = self.ssn.receiver("amq.topic") snd.send(m1) snd.send(Message(subject="b", content=m2)) e1 = rcv.fetch(timeout=0) e2 = rcv.fetch(timeout=0) assert e1.subject == "a", "subject: %s" % e1.subject assert e2.subject == "b", "subject: %s" % e2.subject self.assertEmpty(rcv) def doReliabilityTest(self, reliability, messages, expected): snd = self.ssn.sender("amq.topic") rcv = self.ssn.receiver("amq.topic; {link: {reliability: %s}}" % reliability) for m in messages: snd.send(m) self.conn.detach() self.conn.attach() self.drain(rcv, expected=expected) def testReliabilityUnreliable(self): msgs = [self.message("testReliabilityUnreliable", i) for i in range(3)] self.doReliabilityTest("unreliable", msgs, []) def testReliabilityAtLeastOnce(self): msgs = [self.message("testReliabilityAtLeastOnce", i) for i in range(3)] self.doReliabilityTest("at-least-once", msgs, msgs) def testLinkName(self): msgs = [self.message("testLinkName", i) for i in range(3)] snd = self.ssn.sender("amq.topic") trcv = self.ssn.receiver("amq.topic; {link: {name: test-link-name}}") qrcv = self.ssn.receiver("test-link-name") for m in msgs: snd.send(m) self.drain(qrcv, expected=msgs) def testAssert1(self): try: snd = self.ssn.sender("amq.topic; {assert: always, node: {type: queue}}") assert 0, "assertion failed to trigger" except AssertionFailed, e: pass def testAssert2(self): snd = self.ssn.sender("amq.topic; {assert: always}") NOSUCH_Q = "this-queue-should-not-exist" UNPARSEABLE_ADDR = "name/subject; {bad options" UNLEXABLE_ADDR = "\0x0\0x1\0x2\0x3" class AddressErrorTests(Base): def setup_connection(self): return Connection.establish(self.broker, **self.connection_options()) def setup_session(self): return self.conn.session() def senderErrorTest(self, addr, exc, check=lambda e: True): try: self.ssn.sender(addr, durable=self.durable()) assert False, "sender creation succeeded" except exc, e: assert check(e), "unexpected error: %s" % compat.format_exc(e) def receiverErrorTest(self, addr, exc, check=lambda e: True): try: self.ssn.receiver(addr) assert False, "receiver creation succeeded" except exc, e: assert check(e), "unexpected error: %s" % compat.format_exc(e) def testNoneTarget(self): self.senderErrorTest(None, MalformedAddress) def testNoneSource(self): self.receiverErrorTest(None, MalformedAddress) def testNoTarget(self): self.senderErrorTest(NOSUCH_Q, NotFound, lambda e: NOSUCH_Q in str(e)) def testNoSource(self): self.receiverErrorTest(NOSUCH_Q, NotFound, lambda e: NOSUCH_Q in str(e)) def testUnparseableTarget(self): self.senderErrorTest(UNPARSEABLE_ADDR, MalformedAddress, lambda e: "expecting COLON" in str(e)) def testUnparseableSource(self): self.receiverErrorTest(UNPARSEABLE_ADDR, MalformedAddress, lambda e: "expecting COLON" in str(e)) def testUnlexableTarget(self): self.senderErrorTest(UNLEXABLE_ADDR, MalformedAddress, lambda e: "unrecognized characters" in str(e)) def testUnlexableSource(self): self.receiverErrorTest(UNLEXABLE_ADDR, MalformedAddress, lambda e: "unrecognized characters" in str(e)) def testInvalidMode(self): self.receiverErrorTest('name; {mode: "this-is-a-bad-receiver-mode"}', InvalidOption, lambda e: "not in ('browse', 'consume')" in str(e)) SENDER_Q = 'test-sender-q; {create: always, delete: always}' class SenderTests(Base): def setup_connection(self): return Connection.establish(self.broker, **self.connection_options()) def setup_session(self): return self.conn.session() def setup_sender(self): return self.ssn.sender(SENDER_Q) def setup_receiver(self): return self.ssn.receiver(SENDER_Q) def checkContent(self, content): self.snd.send(content) msg = self.rcv.fetch(0) assert msg.content == content out = Message(content) self.snd.send(out) echo = self.rcv.fetch(0) assert out.content == echo.content assert echo.content == msg.content self.ssn.acknowledge() def testSendString(self): self.checkContent(self.content("testSendString")) def testSendList(self): self.checkContent(["testSendList", 1, 3.14, self.test_id]) def testSendMap(self): self.checkContent({"testSendMap": self.test_id, "pie": "blueberry", "pi": 3.14}) def asyncTest(self, capacity): self.snd.capacity = capacity msgs = [self.content("asyncTest", i) for i in range(15)] for m in msgs: self.snd.send(m, sync=False) self.drain(self.rcv, timeout=self.delay(), expected=msgs) self.ssn.acknowledge() def testSendAsyncCapacity0(self): try: self.asyncTest(0) assert False, "send shouldn't succeed with zero capacity" except InsufficientCapacity: # this is expected pass def testSendAsyncCapacity1(self): self.asyncTest(1) def testSendAsyncCapacity5(self): self.asyncTest(5) def testSendAsyncCapacityUNLIMITED(self): self.asyncTest(UNLIMITED) def testCapacityTimeout(self): self.snd.capacity = 1 msgs = [] caught = False while len(msgs) < 100: m = self.content("testCapacity", len(msgs)) try: self.snd.send(m, sync=False, timeout=0) msgs.append(m) except InsufficientCapacity: caught = True break self.snd.sync() self.drain(self.rcv, expected=msgs) self.ssn.acknowledge() assert caught, "did not exceed capacity" def testEINTR(self): m1 = self.content("testEINTR", 0) m2 = self.content("testEINTR", 1) self.snd.send(m1, timeout=self.timeout()) try: os.setuid(500) assert False, "setuid should fail" except: pass self.snd.send(m2, timeout=self.timeout()) qpid-python-0.22/python/qpid/tests/messaging/message.py0000644000175000017500000001060011442226077021366 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from qpid.messaging import * from qpid.tests.messaging import Base class MessageTests(Base): def testCreateString(self): m = Message("string") assert m.content == "string" assert m.content_type is None def testCreateUnicode(self): m = Message(u"unicode") assert m.content == u"unicode" assert m.content_type == "text/plain" def testCreateMap(self): m = Message({}) assert m.content == {} assert m.content_type == "amqp/map" def testCreateList(self): m = Message([]) assert m.content == [] assert m.content_type == "amqp/list" def testContentTypeOverride(self): m = Message() m.content_type = "text/html; charset=utf8" m.content = u"" assert m.content_type == "text/html; charset=utf8" ECHO_Q = 'test-message-echo-queue; {create: always, delete: always}' class MessageEchoTests(Base): def setup_connection(self): return Connection.establish(self.broker, **self.connection_options()) def setup_session(self): return self.conn.session() def setup_sender(self): return self.ssn.sender(ECHO_Q) def setup_receiver(self): return self.ssn.receiver(ECHO_Q) def check(self, msg): self.snd.send(msg) echo = self.rcv.fetch(0) self.assertEcho(msg, echo) self.ssn.acknowledge(echo) def testStringContent(self): self.check(Message("string")) def testUnicodeContent(self): self.check(Message(u"unicode")) TEST_MAP = {"key1": "string", "key2": u"unicode", "key3": 3, "key4": -3, "key5": 3.14, "key6": -3.14, "key7": ["one", 2, 3.14], "key8": [], "key9": {"sub-key0": 3}, "key10": True, "key11": False, "x-amqp-0-10.app-id": "test-app-id", "x-amqp-0-10.content-encoding": "test-content-encoding"} def testMapContent(self): self.check(Message(MessageEchoTests.TEST_MAP)) def testListContent(self): self.check(Message([])) self.check(Message([1, 2, 3])) self.check(Message(["one", 2, 3.14, {"four": 4}])) def testProperties(self): msg = Message() msg.subject = "subject" msg.correlation_id = str(self.test_id) msg.durable = True msg.priority = 7 msg.ttl = 60 msg.properties = MessageEchoTests.TEST_MAP msg.reply_to = "reply-address" self.check(msg) def testContentTypeUnknown(self): msg = Message(content_type = "this-content-type-does-not-exist") self.check(msg) def testTextPlain(self): self.check(Message(content_type="text/plain", content="asdf")) def testTextPlainEmpty(self): self.check(Message(content_type="text/plain")) def check_rt(self, addr, expected=None): if expected is None: expected = addr msg = Message(reply_to=addr) self.snd.send(msg) echo = self.rcv.fetch(0) assert echo.reply_to == expected, echo.reply_to self.ssn.acknowledge(echo) def testReplyTo(self): self.check_rt("name") def testReplyToQueue(self): self.check_rt("name; {node: {type: queue}}", "name") def testReplyToQueueSubject(self): self.check_rt("name/subject; {node: {type: queue}}", "name") def testReplyToTopic(self): self.check_rt("name; {node: {type: topic}}") def testReplyToTopicSubject(self): self.check_rt("name/subject; {node: {type: topic}}") def testBooleanEncoding(self): msg = Message({"true": True, "false": False}) self.snd.send(msg) echo = self.rcv.fetch(0) self.assertEcho(msg, echo) t = echo.content["true"] f = echo.content["false"] assert isinstance(t, bool), t assert isinstance(f, bool), f qpid-python-0.22/python/qpid/tests/messaging/address.py0000644000175000017500000002434011513625147021375 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from qpid.tests import Test from qpid.messaging.address import lex, parse, ParseError, EOF, ID, NUMBER, \ SYM, WSPACE, LEXER from qpid.lexer import Token from qpid.harness import Skipped from qpid.tests.parser import ParserBase def indent(st): return " " + st.replace("\n", "\n ") def pprint_address(name, subject, options): return "NAME: %s\nSUBJECT: %s\nOPTIONS: %s" % \ (pprint(name), pprint(subject), pprint(options)) def pprint(o): if isinstance(o, dict): return pprint_map(o) elif isinstance(o, list): return pprint_list(o) elif isinstance(o, basestring): return pprint_string(o) else: return repr(o) def pprint_map(m): items = ["%s: %s" % (pprint(k), pprint(v)) for k, v in m.items()] items.sort() return pprint_items("{", items, "}") def pprint_list(l): return pprint_items("[", [pprint(x) for x in l], "]") def pprint_items(start, items, end): if items: return "%s\n%s\n%s" % (start, ",\n".join([indent(i) for i in items]), end) else: return "%s%s" % (start, end) def pprint_string(s): result = "'" for c in s: if c == "'": result += "\\'" elif c == "\n": result += "\\n" elif ord(c) >= 0x80: result += "\\u%04x" % ord(c) else: result += c result += "'" return result class AddressTests(ParserBase, Test): EXCLUDE = (WSPACE, EOF) def fields(self, line, n): result = line.split(":", n - 1) result.extend([None]*(n - len(result))) return result def call(self, parser, mode, input): try: from subprocess import Popen, PIPE, STDOUT po = Popen([parser, mode], stdin=PIPE, stdout=PIPE, stderr=STDOUT) except ImportError, e: raise Skipped("%s" % e) except OSError, e: raise Skipped("%s: %s" % (e, parser)) out, _ = po.communicate(input=input) return out def parser(self): return self.config.defines.get("address.parser") def do_lex(self, st): parser = self.parser() if parser: out = self.call(parser, "lex", st) lines = out.split("\n") toks = [] for line in lines: if line.strip(): name, position, value = self.fields(line, 3) toks.append(Token(LEXER.type(name), value, position, st)) return toks else: return lex(st) def do_parse(self, st): return parse(st) def valid(self, addr, name=None, subject=None, options=None): parser = self.parser() if parser: got = self.call(parser, "parse", addr) expected = "%s\n" % pprint_address(name, subject, options) assert expected == got, "expected\n%s\ngot\n%s" % (expected, got) else: ParserBase.valid(self, addr, (name, subject, options)) def invalid(self, addr, error=None): parser = self.parser() if parser: got = self.call(parser, "parse", addr) expected = "ERROR: %s\n" % error assert expected == got, "expected %r, got %r" % (expected, got) else: ParserBase.invalid(self, addr, error) def testDashInId1(self): self.lex("foo-bar", ID) def testDashInId2(self): self.lex("foo-3", ID) def testDashAlone1(self): self.lex("foo - bar", ID, SYM, ID) def testDashAlone2(self): self.lex("foo - 3", ID, SYM, NUMBER) def testLeadingDash(self): self.lex("-foo", SYM, ID) def testTrailingDash(self): self.lex("foo-", ID, SYM) def testNegativeNum(self): self.lex("-3", NUMBER) def testIdNum(self): self.lex("id1", ID) def testIdSpaceNum(self): self.lex("id 1", ID, NUMBER) def testHash(self): self.valid("foo/bar.#", "foo", "bar.#") def testStar(self): self.valid("foo/bar.*", "foo", "bar.*") def testColon(self): self.valid("foo.bar/baz.qux:moo:arf", "foo.bar", "baz.qux:moo:arf") def testOptions(self): self.valid("foo.bar/baz.qux:moo:arf; {key: value}", "foo.bar", "baz.qux:moo:arf", {"key": "value"}) def testOptionsTrailingComma(self): self.valid("name/subject; {key: value,}", "name", "subject", {"key": "value"}) def testOptionsNone(self): self.valid("name/subject; {key: None}", "name", "subject", {"key": None}) def testSemiSubject(self): self.valid("foo.bar/'baz.qux;moo:arf'; {key: value}", "foo.bar", "baz.qux;moo:arf", {"key": "value"}) def testCommaSubject(self): self.valid("foo.bar/baz.qux.{moo,arf}", "foo.bar", "baz.qux.{moo,arf}") def testCommaSubjectOptions(self): self.valid("foo.bar/baz.qux.{moo,arf}; {key: value}", "foo.bar", "baz.qux.{moo,arf}", {"key": "value"}) def testUnbalanced(self): self.valid("foo.bar/baz.qux.{moo,arf; {key: value}", "foo.bar", "baz.qux.{moo,arf", {"key": "value"}) def testSlashQuote(self): self.valid("foo.bar\\/baz.qux.{moo,arf; {key: value}", "foo.bar/baz.qux.{moo,arf", None, {"key": "value"}) def testSlashHexEsc1(self): self.valid("foo.bar\\x00baz.qux.{moo,arf; {key: value}", "foo.bar\x00baz.qux.{moo,arf", None, {"key": "value"}) def testSlashHexEsc2(self): self.valid("foo.bar\\xffbaz.qux.{moo,arf; {key: value}", "foo.bar\xffbaz.qux.{moo,arf", None, {"key": "value"}) def testSlashHexEsc3(self): self.valid("foo.bar\\xFFbaz.qux.{moo,arf; {key: value}", "foo.bar\xFFbaz.qux.{moo,arf", None, {"key": "value"}) def testSlashUnicode1(self): self.valid("foo.bar\\u1234baz.qux.{moo,arf; {key: value}", u"foo.bar\u1234baz.qux.{moo,arf", None, {"key": "value"}) def testSlashUnicode2(self): self.valid("foo.bar\\u0000baz.qux.{moo,arf; {key: value}", u"foo.bar\u0000baz.qux.{moo,arf", None, {"key": "value"}) def testSlashUnicode3(self): self.valid("foo.bar\\uffffbaz.qux.{moo,arf; {key: value}", u"foo.bar\uffffbaz.qux.{moo,arf", None, {"key": "value"}) def testSlashUnicode4(self): self.valid("foo.bar\\uFFFFbaz.qux.{moo,arf; {key: value}", u"foo.bar\uFFFFbaz.qux.{moo,arf", None, {"key": "value"}) def testNoName(self): self.invalid("; {key: value}", "unexpected token SEMI(;) line:1,0:; {key: value}") def testEmpty(self): self.invalid("", "unexpected token EOF line:1,0:") def testNoNameSlash(self): self.invalid("/asdf; {key: value}", "unexpected token SLASH(/) line:1,0:/asdf; {key: value}") def testBadOptions1(self): self.invalid("name/subject; {", "expecting (NUMBER, STRING, ID, LBRACE, LBRACK, RBRACE), " "got EOF line:1,15:name/subject; {") def testBadOptions2(self): self.invalid("name/subject; { 3", "expecting COLON, got EOF " "line:1,17:name/subject; { 3") def testBadOptions3(self): self.invalid("name/subject; { key:", "expecting (NUMBER, STRING, ID, LBRACE, LBRACK), got EOF " "line:1,20:name/subject; { key:") def testBadOptions4(self): self.invalid("name/subject; { key: value", "expecting (COMMA, RBRACE), got EOF " "line:1,26:name/subject; { key: value") def testBadOptions5(self): self.invalid("name/subject; { key: value asdf", "expecting (COMMA, RBRACE), got ID(asdf) " "line:1,27:name/subject; { key: value asdf") def testBadOptions6(self): self.invalid("name/subject; { key: value,", "expecting (NUMBER, STRING, ID, LBRACE, LBRACK, RBRACE), got EOF " "line:1,27:name/subject; { key: value,") def testBadOptions7(self): self.invalid("name/subject; { key: value } asdf", "expecting EOF, got ID(asdf) " "line:1,29:name/subject; { key: value } asdf") def testList1(self): self.valid("name/subject; { key: [] }", "name", "subject", {"key": []}) def testList2(self): self.valid("name/subject; { key: ['one'] }", "name", "subject", {"key": ['one']}) def testList3(self): self.valid("name/subject; { key: [1, 2, 3] }", "name", "subject", {"key": [1, 2, 3]}) def testList4(self): self.valid("name/subject; { key: [1, [2, 3], 4] }", "name", "subject", {"key": [1, [2, 3], 4]}) def testBadList1(self): self.invalid("name/subject; { key: [ }", "expecting (NUMBER, STRING, ID, LBRACE, LBRACK), " "got RBRACE(}) line:1,23:name/subject; { key: [ }") def testBadList2(self): self.invalid("name/subject; { key: [ 1 }", "expecting (COMMA, RBRACK), " "got RBRACE(}) line:1,25:name/subject; { key: [ 1 }") def testBadList3(self): self.invalid("name/subject; { key: [ 1 2 }", "expecting (COMMA, RBRACK), " "got NUMBER(2) line:1,25:name/subject; { key: [ 1 2 }") def testBadList4(self): self.invalid("name/subject; { key: [ 1 2 ] }", "expecting (COMMA, RBRACK), " "got NUMBER(2) line:1,25:name/subject; { key: [ 1 2 ] }") def testMap1(self): self.valid("name/subject; { 'key': value }", "name", "subject", {"key": "value"}) def testMap2(self): self.valid("name/subject; { 1: value }", "name", "subject", {1: "value"}) def testMap3(self): self.valid('name/subject; { "foo.bar": value }', "name", "subject", {"foo.bar": "value"}) def testBoolean(self): self.valid("name/subject; { true1: True, true2: true, " "false1: False, false2: false }", "name", "subject", {"true1": True, "true2": True, "false1": False, "false2": False}) qpid-python-0.22/python/qpid/tests/util.py0000644000175000017500000000414112046451714016744 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from unittest import TestCase from qpid.util import get_client_properties_with_defaults class UtilTest (TestCase): def test_get_spec_recommended_client_properties(self): client_properties = get_client_properties_with_defaults(provided_client_properties={"mykey":"myvalue"}) self.assertTrue("product" in client_properties) self.assertTrue("version" in client_properties) self.assertTrue("platform" in client_properties) def test_get_client_properties_with_provided_value(self): client_properties = get_client_properties_with_defaults(provided_client_properties={"mykey":"myvalue"}) self.assertTrue("product" in client_properties) self.assertTrue("mykey" in client_properties) self.assertEqual("myvalue", client_properties["mykey"]) def test_get_client_properties_with_no_provided_values(self): client_properties = get_client_properties_with_defaults(provided_client_properties=None) self.assertTrue("product" in client_properties) client_properties = get_client_properties_with_defaults() self.assertTrue("product" in client_properties) def test_get_client_properties_with_provided_value_that_overrides_default(self): client_properties = get_client_properties_with_defaults(provided_client_properties={"version":"myversion"}) self.assertEqual("myversion", client_properties["version"]) qpid-python-0.22/python/qpid/tests/__init__.py0000644000175000017500000000312412046451714017526 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # class Test: def __init__(self, name): self.name = name def configure(self, config): self.config = config # API Tests import qpid.tests.framing import qpid.tests.mimetype import qpid.tests.messaging # Legacy Tests import qpid.tests.codec import qpid.tests.queue import qpid.tests.datatypes import qpid.tests.connection import qpid.tests.spec010 import qpid.tests.codec010 import qpid.tests.util class TestTestsXXX(Test): def testFoo(self): print "this test has output" def testBar(self): print "this test "*8 print "has"*10 print "a"*75 print "lot of"*10 print "output"*10 def testQux(self): import sys sys.stdout.write("this test has output with no newline") def testQuxFail(self): import sys sys.stdout.write("this test has output with no newline") fdsa qpid-python-0.22/python/qpid/tests/mimetype.py0000644000175000017500000000362011307763616017627 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from qpid.tests import Test from qpid.mimetype import lex, parse, ParseError, EOF, WSPACE from parser import ParserBase class MimeTypeTests(ParserBase, Test): EXCLUDE = (WSPACE, EOF) def do_lex(self, st): return lex(st) def do_parse(self, st): return parse(st) def valid(self, addr, type=None, subtype=None, parameters=None): ParserBase.valid(self, addr, (type, subtype, parameters)) def testTypeOnly(self): self.invalid("type", "expecting SLASH, got EOF line:1,4:type") def testTypeSubtype(self): self.valid("type/subtype", "type", "subtype", []) def testTypeSubtypeParam(self): self.valid("type/subtype ; name=value", "type", "subtype", [("name", "value")]) def testTypeSubtypeParamComment(self): self.valid("type/subtype ; name(This is a comment.)=value", "type", "subtype", [("name", "value")]) def testMultipleParams(self): self.valid("type/subtype ; name1=value1 ; name2=value2", "type", "subtype", [("name1", "value1"), ("name2", "value2")]) def testCaseInsensitivity(self): self.valid("Type/Subtype", "type", "subtype", []) qpid-python-0.22/python/qpid/tests/spec010.py0000644000175000017500000000522611333034372017142 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import os, tempfile, shutil, stat from unittest import TestCase from qpid.codec010 import Codec, StringCodec from qpid.ops import * class SpecTest(TestCase): def testSessionHeader(self): sc = StringCodec() sc.write_compound(Header(sync=True)) assert sc.encoded == "\x01\x01" sc = StringCodec() sc.write_compound(Header(sync=False)) assert sc.encoded == "\x01\x00" def encdec(self, value): sc = StringCodec() sc.write_compound(value) decoded = sc.read_compound(value.__class__) return decoded def testMessageProperties(self): props = MessageProperties(content_length=3735928559L, reply_to=ReplyTo(exchange="the exchange name", routing_key="the routing key")) dec = self.encdec(props) assert props.content_length == dec.content_length assert props.reply_to.exchange == dec.reply_to.exchange assert props.reply_to.routing_key == dec.reply_to.routing_key def testMessageSubscribe(self): cmd = MessageSubscribe(exclusive=True, destination="this is a test") dec = self.encdec(cmd) assert cmd.exclusive == dec.exclusive assert cmd.destination == dec.destination def testXid(self): sc = StringCodec() xid = Xid(format=0, global_id="gid", branch_id="bid") sc.write_compound(xid) assert sc.encoded == '\x00\x00\x00\x10\x06\x04\x07\x00\x00\x00\x00\x00\x03gid\x03bid' dec = sc.read_compound(Xid) assert xid.__dict__ == dec.__dict__ # def testLoadReadOnly(self): # spec = "amqp.0-10-qpid-errata.xml" # f = testrunner.get_spec_file(spec) # dest = tempfile.mkdtemp() # shutil.copy(f, dest) # shutil.copy(os.path.join(os.path.dirname(f), "amqp.0-10.dtd"), dest) # os.chmod(dest, stat.S_IRUSR | stat.S_IXUSR) # fname = os.path.join(dest, spec) # load(fname) # assert not os.path.exists("%s.pcl" % fname) qpid-python-0.22/python/qpid/tests/parser.py0000644000175000017500000000255311307763616017276 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from qpid.parser import ParseError class ParserBase: def lex(self, addr, *types): toks = [t.type for t in self.do_lex(addr) if t.type not in self.EXCLUDE] assert list(types) == toks, "expected %s, got %s" % (types, toks) def valid(self, addr, expected): got = self.do_parse(addr) assert expected == got, "expected %s, got %s" % (expected, got) def invalid(self, addr, error=None): try: p = self.do_parse(addr) assert False, "invalid address parsed: %s" % p except ParseError, e: assert error == str(e), "expected %r, got %r" % (error, str(e)) qpid-python-0.22/python/qpid/tests/connection.py0000644000175000017500000001332311336673153020134 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import time from threading import * from unittest import TestCase from qpid.util import connect, listen from qpid.connection import * from qpid.datatypes import Message from qpid.delegates import Server from qpid.queue import Queue from qpid.session import Delegate from qpid.ops import QueueQueryResult PORT = 1234 class TestServer: def __init__(self, queue): self.queue = queue def connection(self, connection): return Server(connection, delegate=self.session) def session(self, session): session.auto_sync = False return TestSession(session, self.queue) class TestSession(Delegate): def __init__(self, session, queue): self.session = session self.queue = queue def execution_sync(self, es): pass def queue_query(self, qq): return QueueQueryResult(qq.queue) def message_transfer(self, cmd): if cmd.destination == "echo": m = Message(cmd.payload) m.headers = cmd.headers self.session.message_transfer(cmd.destination, cmd.accept_mode, cmd.acquire_mode, m) elif cmd.destination == "abort": self.session.channel.connection.sock.close() elif cmd.destination == "heartbeat": self.session.channel.connection_heartbeat() else: self.queue.put(cmd) class ConnectionTest(TestCase): def setUp(self): self.queue = Queue() self.running = True started = Event() def run(): ts = TestServer(self.queue) for s in listen("0.0.0.0", PORT, lambda: self.running, lambda: started.set()): conn = Connection(s, delegate=ts.connection) try: conn.start(5) except Closed: pass self.server = Thread(target=run) self.server.setDaemon(True) self.server.start() started.wait(3) assert started.isSet() def tearDown(self): self.running = False connect("127.0.0.1", PORT).close() self.server.join(3) def connect(self, **kwargs): return Connection(connect("127.0.0.1", PORT), **kwargs) def test(self): c = self.connect() c.start(10) ssn1 = c.session("test1", timeout=10) ssn2 = c.session("test2", timeout=10) assert ssn1 == c.sessions["test1"] assert ssn2 == c.sessions["test2"] assert ssn1.channel != None assert ssn2.channel != None assert ssn1 in c.attached.values() assert ssn2 in c.attached.values() ssn1.close(5) assert ssn1.channel == None assert ssn1 not in c.attached.values() assert ssn2 in c.sessions.values() ssn2.close(5) assert ssn2.channel == None assert ssn2 not in c.attached.values() assert ssn2 not in c.sessions.values() ssn = c.session("session", timeout=10) assert ssn.channel != None assert ssn in c.sessions.values() destinations = ("one", "two", "three") for d in destinations: ssn.message_transfer(d) for d in destinations: cmd = self.queue.get(10) assert cmd.destination == d assert cmd.headers == None assert cmd.payload == None msg = Message("this is a test") ssn.message_transfer("four", message=msg) cmd = self.queue.get(10) assert cmd.destination == "four" assert cmd.headers == None assert cmd.payload == msg.body qq = ssn.queue_query("asdf") assert qq.queue == "asdf" c.close(5) def testCloseGet(self): c = self.connect() c.start(10) ssn = c.session("test", timeout=10) echos = ssn.incoming("echo") for i in range(10): ssn.message_transfer("echo", message=Message("test%d" % i)) ssn.auto_sync=False ssn.message_transfer("abort") for i in range(10): m = echos.get(timeout=10) assert m.body == "test%d" % i try: m = echos.get(timeout=10) assert False except Closed, e: pass def testCloseListen(self): c = self.connect() c.start(10) ssn = c.session("test", timeout=10) echos = ssn.incoming("echo") messages = [] exceptions = [] condition = Condition() def listener(m): messages.append(m) def exc_listener(e): condition.acquire() exceptions.append(e) condition.notify() condition.release() echos.listen(listener, exc_listener) for i in range(10): ssn.message_transfer("echo", message=Message("test%d" % i)) ssn.auto_sync=False ssn.message_transfer("abort") condition.acquire() start = time.time() elapsed = 0 while not exceptions and elapsed < 10: condition.wait(10 - elapsed) elapsed = time.time() - start condition.release() for i in range(10): m = messages.pop(0) assert m.body == "test%d" % i assert len(exceptions) == 1 def testSync(self): c = self.connect() c.start(10) s = c.session("test") s.auto_sync = False s.message_transfer("echo", message=Message("test")) s.sync(10) def testHeartbeat(self): c = self.connect(heartbeat=10) c.start(10) s = c.session("test") s.channel.connection_heartbeat() s.message_transfer("heartbeat") qpid-python-0.22/python/qpid/tests/framing.py0000644000175000017500000002442111240310343017400 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # setup, usage, teardown, errors(sync), errors(async), stress, soak, # boundary-conditions, config from qpid.tests import Test from qpid.framing import * class Base(Test): def cmp_frames(self, frm1, frm2): assert frm1.flags == frm2.flags, "expected: %r, got %r" % (frm1, frm2) assert frm1.type == frm2.type, "expected: %r, got %r" % (frm1, frm2) assert frm1.track == frm2.track, "expected: %r, got %r" % (frm1, frm2) assert frm1.channel == frm2.channel, "expected: %r, got %r" % (frm1, frm2) assert frm1.payload == frm2.payload, "expected: %r, got %r" % (frm1, frm2) def cmp_segments(self, seg1, seg2): assert seg1.first == seg2.first, "expected: %r, got %r" % (seg1, seg2) assert seg1.last == seg2.last, "expected: %r, got %r" % (seg1, seg2) assert seg1.type == seg2.type, "expected: %r, got %r" % (seg1, seg2) assert seg1.track == seg2.track, "expected: %r, got %r" % (seg1, seg2) assert seg1.channel == seg2.channel, "expected: %r, got %r" % (seg1, seg2) assert seg1.payload == seg2.payload, "expected: %r, got %r" % (seg1, seg2) def cmp_list(self, l1, l2): if l1 is None: assert l2 is None return assert len(l1) == len(l2) for v1, v2 in zip(l1, l2): if isinstance(v1, Compound): self.cmp_ops(v1, v2) else: assert v1 == v2 def cmp_ops(self, op1, op2): if op1 is None: assert op2 is None return assert op1.__class__ == op2.__class__ cls = op1.__class__ assert op1.NAME == op2.NAME assert op1.CODE == op2.CODE assert op1.FIELDS == op2.FIELDS for f in cls.FIELDS: v1 = getattr(op1, f.name) v2 = getattr(op2, f.name) if COMPOUND.has_key(f.type) or f.type == "struct32": self.cmp_ops(v1, v2) elif f.type in ("list", "array"): self.cmp_list(v1, v2) else: assert v1 == v2, "expected: %r, got %r" % (v1, v2) if issubclass(cls, Command) or issubclass(cls, Control): assert op1.channel == op2.channel if issubclass(cls, Command): assert op1.sync == op2.sync, "expected: %r, got %r" % (op1.sync, op2.sync) assert (op1.headers is None and op2.headers is None) or \ (op1.headers is not None and op2.headers is not None) if op1.headers is not None: assert len(op1.headers) == len(op2.headers) for h1, h2 in zip(op1.headers, op2.headers): self.cmp_ops(h1, h2) class FrameTest(Base): def enc_dec(self, frames, encoded=None): enc = FrameEncoder() dec = FrameDecoder() enc.write(*frames) bytes = enc.read() if encoded is not None: assert bytes == encoded, "expected %r, got %r" % (encoded, bytes) dec.write(bytes) dframes = dec.read() assert len(frames) == len(dframes) for f, df, in zip(frames, dframes): self.cmp_frames(f, df) def testEmpty(self): self.enc_dec([Frame(0, 0, 0, 0, "")], "\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00") def testSingle(self): self.enc_dec([Frame(0, 0, 0, 1, "payload")], "\x00\x00\x00\x13\x00\x00\x00\x01\x00\x00\x00\x00payload") def testMaxChannel(self): self.enc_dec([Frame(0, 0, 0, 65535, "max-channel")], "\x00\x00\x00\x17\x00\x00\xff\xff\x00\x00\x00\x00max-channel") def testMaxType(self): self.enc_dec([Frame(0, 255, 0, 0, "max-type")], "\x00\xff\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00max-type") def testMaxTrack(self): self.enc_dec([Frame(0, 0, 15, 0, "max-track")], "\x00\x00\x00\x15\x00\x0f\x00\x00\x00\x00\x00\x00max-track") def testSequence(self): self.enc_dec([Frame(0, 0, 0, 0, "zero"), Frame(0, 0, 0, 1, "one"), Frame(0, 0, 1, 0, "two"), Frame(0, 0, 1, 1, "three"), Frame(0, 1, 0, 0, "four"), Frame(0, 1, 0, 1, "five"), Frame(0, 1, 1, 0, "six"), Frame(0, 1, 1, 1, "seven"), Frame(1, 0, 0, 0, "eight"), Frame(1, 0, 0, 1, "nine"), Frame(1, 0, 1, 0, "ten"), Frame(1, 0, 1, 1, "eleven"), Frame(1, 1, 0, 0, "twelve"), Frame(1, 1, 0, 1, "thirteen"), Frame(1, 1, 1, 0, "fourteen"), Frame(1, 1, 1, 1, "fifteen")]) class SegmentTest(Base): def enc_dec(self, segments, frames=None, interleave=None, max_payload=Frame.MAX_PAYLOAD): enc = SegmentEncoder(max_payload) dec = SegmentDecoder() enc.write(*segments) frms = enc.read() if frames is not None: assert len(frames) == len(frms), "expected %s, got %s" % (frames, frms) for f1, f2 in zip(frames, frms): self.cmp_frames(f1, f2) if interleave is not None: ilvd = [] for f in frms: ilvd.append(f) if interleave: ilvd.append(interleave.pop(0)) ilvd.extend(interleave) dec.write(*ilvd) else: dec.write(*frms) segs = dec.read() assert len(segments) == len(segs) for s1, s2 in zip(segments, segs): self.cmp_segments(s1, s2) def testEmpty(self): self.enc_dec([Segment(True, True, 0, 0, 0, "")], [Frame(FIRST_FRM | LAST_FRM | FIRST_SEG | LAST_SEG, 0, 0, 0, "")]) def testSingle(self): self.enc_dec([Segment(True, True, 0, 0, 0, "payload")], [Frame(FIRST_FRM | LAST_FRM | FIRST_SEG | LAST_SEG, 0, 0, 0, "payload")]) def testMaxChannel(self): self.enc_dec([Segment(False, False, 0, 0, 65535, "max-channel")], [Frame(FIRST_FRM | LAST_FRM, 0, 0, 65535, "max-channel")]) def testMaxType(self): self.enc_dec([Segment(False, False, 255, 0, 0, "max-type")], [Frame(FIRST_FRM | LAST_FRM, 255, 0, 0, "max-type")]) def testMaxTrack(self): self.enc_dec([Segment(False, False, 0, 15, 0, "max-track")], [Frame(FIRST_FRM | LAST_FRM, 0, 15, 0, "max-track")]) def testSequence(self): self.enc_dec([Segment(True, False, 0, 0, 0, "one"), Segment(False, False, 0, 0, 0, "two"), Segment(False, True, 0, 0, 0, "three")], [Frame(FIRST_FRM | LAST_FRM | FIRST_SEG, 0, 0, 0, "one"), Frame(FIRST_FRM | LAST_FRM, 0, 0, 0, "two"), Frame(FIRST_FRM | LAST_FRM | LAST_SEG, 0, 0, 0, "three")]) def testInterleaveChannel(self): frames = [Frame(0, 0, 0, 0, chr(ord("a") + i)) for i in range(7)] frames[0].flags |= FIRST_FRM frames[-1].flags |= LAST_FRM ilvd = [Frame(0, 0, 0, 1, chr(ord("a") + i)) for i in range(7)] self.enc_dec([Segment(False, False, 0, 0, 0, "abcdefg")], frames, ilvd, max_payload=1) def testInterleaveTrack(self): frames = [Frame(0, 0, 0, 0, "%c%c" % (ord("a") + i, ord("a") + i + 1)) for i in range(0, 8, 2)] frames[0].flags |= FIRST_FRM frames[-1].flags |= LAST_FRM ilvd = [Frame(0, 0, 1, 0, "%c%c" % (ord("a") + i, ord("a") + i + 1)) for i in range(0, 8, 2)] self.enc_dec([Segment(False, False, 0, 0, 0, "abcdefgh")], frames, ilvd, max_payload=2) from qpid.ops import * class OpTest(Base): def enc_dec(self, ops): enc = OpEncoder() dec = OpDecoder() enc.write(*ops) segs = enc.read() dec.write(*segs) dops = dec.read() assert len(ops) == len(dops) for op1, op2 in zip(ops, dops): self.cmp_ops(op1, op2) def testEmtpyMT(self): self.enc_dec([MessageTransfer()]) def testEmptyMTSync(self): self.enc_dec([MessageTransfer(sync=True)]) def testMT(self): self.enc_dec([MessageTransfer(destination="asdf")]) def testSyncMT(self): self.enc_dec([MessageTransfer(destination="asdf", sync=True)]) def testEmptyPayloadMT(self): self.enc_dec([MessageTransfer(payload="")]) def testPayloadMT(self): self.enc_dec([MessageTransfer(payload="test payload")]) def testHeadersEmptyPayloadMT(self): self.enc_dec([MessageTransfer(headers=[DeliveryProperties()])]) def testHeadersPayloadMT(self): self.enc_dec([MessageTransfer(headers=[DeliveryProperties()], payload="test payload")]) def testMultiHeadersEmptyPayloadMT(self): self.enc_dec([MessageTransfer(headers=[DeliveryProperties(), MessageProperties()])]) def testMultiHeadersPayloadMT(self): self.enc_dec([MessageTransfer(headers=[MessageProperties(), DeliveryProperties()], payload="test payload")]) def testContentTypeHeadersPayloadMT(self): self.enc_dec([MessageTransfer(headers=[MessageProperties(content_type="text/plain")], payload="test payload")]) def testMulti(self): self.enc_dec([MessageTransfer(), MessageTransfer(sync=True), MessageTransfer(destination="one"), MessageTransfer(destination="two", sync=True), MessageTransfer(destination="three", payload="test payload")]) def testControl(self): self.enc_dec([SessionAttach(name="asdf")]) def testMixed(self): self.enc_dec([SessionAttach(name="fdsa"), MessageTransfer(destination="test")]) def testChannel(self): self.enc_dec([SessionAttach(name="asdf", channel=3), MessageTransfer(destination="test", channel=1)]) def testCompound(self): self.enc_dec([MessageTransfer(headers=[MessageProperties(reply_to=ReplyTo(exchange="exch", routing_key="rk"))])]) def testListCompound(self): self.enc_dec([ExecutionResult(value=RecoverResult(in_doubt=[Xid(global_id="one"), Xid(global_id="two"), Xid(global_id="three")]))]) qpid-python-0.22/python/qpid/tests/codec010.py0000644000175000017500000001005111333034372017255 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import time from unittest import TestCase from qpid.codec010 import StringCodec from qpid.datatypes import timestamp, uuid4 from qpid.ops import PRIMITIVE class CodecTest(TestCase): def check(self, type, value, compare=True): t = PRIMITIVE[type] sc = StringCodec() sc.write_primitive(t, value) decoded = sc.read_primitive(t) if compare: assert decoded == value, "%s, %s" % (decoded, value) return decoded def testMapString(self): self.check("map", {"string": "this is a test"}) def testMapUnicode(self): self.check("map", {"unicode": u"this is a unicode test"}) def testMapBinary(self): self.check("map", {"binary": "\x7f\xb4R^\xe5\xf0:\x89\x96E1\xf6\xfe\xb9\x1b\xf5"}) def testMapBuffer(self): s = "\x7f\xb4R^\xe5\xf0:\x89\x96E1\xf6\xfe\xb9\x1b\xf5" dec = self.check("map", {"buffer": buffer(s)}, False) assert dec["buffer"] == s def testMapInt(self): self.check("map", {"int": 3}) def testMapLong(self): self.check("map", {"long": 2**32}) self.check("map", {"long": 1 << 34}) self.check("map", {"long": -(1 << 34)}) def testMapTimestamp(self): decoded = self.check("map", {"timestamp": timestamp(0)}) assert isinstance(decoded["timestamp"], timestamp) def testMapDatetime(self): decoded = self.check("map", {"datetime": timestamp(0).datetime()}, compare=False) assert isinstance(decoded["datetime"], timestamp) assert decoded["datetime"] == 0.0 def testMapNone(self): self.check("map", {"none": None}) def testMapNested(self): self.check("map", {"map": {"string": "nested test"}}) def testMapList(self): self.check("map", {"list": [1, "two", 3.0, -4]}) def testMapUUID(self): self.check("map", {"uuid": uuid4()}) def testMapAll(self): decoded = self.check("map", {"string": "this is a test", "unicode": u"this is a unicode test", "binary": "\x7f\xb4R^\xe5\xf0:\x89\x96E1\xf6\xfe\xb9\x1b\xf5", "int": 3, "long": 2**32, "timestamp": timestamp(0), "none": None, "map": {"string": "nested map"}, "list": [1, "two", 3.0, -4], "uuid": uuid4()}) assert isinstance(decoded["timestamp"], timestamp) def testMapEmpty(self): self.check("map", {}) def testMapNone(self): self.check("map", None) def testList(self): self.check("list", [1, "two", 3.0, -4]) def testListEmpty(self): self.check("list", []) def testListNone(self): self.check("list", None) def testArrayInt(self): self.check("array", [1, 2, 3, 4]) def testArrayString(self): self.check("array", ["one", "two", "three", "four"]) def testArrayEmpty(self): self.check("array", []) def testArrayNone(self): self.check("array", None) def testInt16(self): self.check("int16", 3) self.check("int16", -3) def testInt64(self): self.check("int64", 3) self.check("int64", -3) self.check("int64", 1<<34) self.check("int64", -(1<<34)) def testDatetime(self): self.check("datetime", timestamp(0)) self.check("datetime", timestamp(long(time.time()))) qpid-python-0.22/python/qpid/tests/queue.py0000644000175000017500000000360011333034372017105 0ustar mbamba# Do not delete - marks this directory as a python package. # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import threading, time from unittest import TestCase from qpid.queue import Queue, Empty, Closed class QueueTest (TestCase): # The qpid queue class just provides sime simple extensions to # python's standard queue data structure, so we don't need to test # all the queue functionality. def test_listen(self): values = [] heard = threading.Event() def listener(x): values.append(x) heard.set() q = Queue(0) q.listen(listener) heard.clear() q.put(1) heard.wait() assert values[-1] == 1 heard.clear() q.put(2) heard.wait() assert values[-1] == 2 q.listen(None) q.put(3) assert q.get(3) == 3 q.listen(listener) heard.clear() q.put(4) heard.wait() assert values[-1] == 4 def test_close(self): q = Queue(0) q.put(1); q.put(2); q.put(3); q.close() assert q.get() == 1 assert q.get() == 2 assert q.get() == 3 for i in range(10): try: q.get() raise AssertionError("expected Closed") except Closed: pass qpid-python-0.22/python/qpid/tests/datatypes.py0000644000175000017500000001602711333034372017766 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from unittest import TestCase from qpid.datatypes import * from qpid.ops import DeliveryProperties, FragmentProperties, MessageProperties class SerialTest(TestCase): def test(self): for s in (serial(0), serial(0x8FFFFFFFL), serial(0xFFFFFFFFL)): assert s + 1 > s assert s - 1 < s assert s < s + 1 assert s > s - 1 assert serial(0xFFFFFFFFL) + 1 == serial(0) assert min(serial(0xFFFFFFFFL), serial(0x0)) == serial(0xFFFFFFFFL) assert max(serial(0xFFFFFFFFL), serial(0x0)) == serial(0x0) def testIncr(self): s = serial(0) s += 1 assert s == serial(1) def testIn(self): l = [serial(1), serial(2), serial(3), serial(4)] assert serial(1) in l assert serial(0xFFFFFFFFL + 2) in l assert 4 in l def testNone(self): assert serial(0) != None def testHash(self): d = {} d[serial(0)] = "zero" assert d[0] == "zero" def testAdd(self): assert serial(2) + 2 == serial(4) assert serial(2) + 2 == 4 def testSub(self): delta = serial(4) - serial(2) assert isinstance(delta, int) or isinstance(delta, long) assert delta == 2 delta = serial(4) - 2 assert isinstance(delta, Serial) assert delta == serial(2) class RangedSetTest(TestCase): def check(self, ranges): posts = [] for range in ranges: posts.append(range.lower) posts.append(range.upper) sorted = posts[:] sorted.sort() assert posts == sorted idx = 1 while idx + 1 < len(posts): assert posts[idx] + 1 != posts[idx+1] idx += 2 def test(self): rs = RangedSet() self.check(rs.ranges) rs.add(1) assert 1 in rs assert 2 not in rs assert 0 not in rs self.check(rs.ranges) rs.add(2) assert 0 not in rs assert 1 in rs assert 2 in rs assert 3 not in rs self.check(rs.ranges) rs.add(0) assert -1 not in rs assert 0 in rs assert 1 in rs assert 2 in rs assert 3 not in rs self.check(rs.ranges) rs.add(37) assert -1 not in rs assert 0 in rs assert 1 in rs assert 2 in rs assert 3 not in rs assert 36 not in rs assert 37 in rs assert 38 not in rs self.check(rs.ranges) rs.add(-1) self.check(rs.ranges) rs.add(-3) self.check(rs.ranges) rs.add(1, 20) assert 21 not in rs assert 20 in rs self.check(rs.ranges) def testAddSelf(self): a = RangedSet() a.add(0, 8) self.check(a.ranges) a.add(0, 8) self.check(a.ranges) assert len(a.ranges) == 1 range = a.ranges[0] assert range.lower == 0 assert range.upper == 8 def testEmpty(self): s = RangedSet() assert s.empty() s.add(0, -1) assert s.empty() s.add(0, 0) assert not s.empty() def testMinMax(self): s = RangedSet() assert s.max() is None assert s.min() is None s.add(0, 10) assert s.max() == 10 assert s.min() == 0 s.add(0, 5) assert s.max() == 10 assert s.min() == 0 s.add(0, 11) assert s.max() == 11 assert s.min() == 0 s.add(15, 20) assert s.max() == 20 assert s.min() == 0 s.add(-10, -5) assert s.max() == 20 assert s.min() == -10 class RangeTest(TestCase): def testIntersect1(self): a = Range(0, 10) b = Range(9, 20) i1 = a.intersect(b) i2 = b.intersect(a) assert i1.upper == 10 assert i2.upper == 10 assert i1.lower == 9 assert i2.lower == 9 def testIntersect2(self): a = Range(0, 10) b = Range(11, 20) assert a.intersect(b) == None assert b.intersect(a) == None def testIntersect3(self): a = Range(0, 10) b = Range(3, 5) i1 = a.intersect(b) i2 = b.intersect(a) assert i1.upper == 5 assert i2.upper == 5 assert i1.lower == 3 assert i2.lower == 3 class UUIDTest(TestCase): def test(self): # this test is kind of lame, but it does excercise the basic # functionality of the class u = uuid4() for i in xrange(1024): assert u != uuid4() class MessageTest(TestCase): def setUp(self): self.mp = MessageProperties() self.dp = DeliveryProperties() self.fp = FragmentProperties() def testHas(self): m = Message(self.mp, self.dp, self.fp, "body") assert m.has("message_properties") assert m.has("delivery_properties") assert m.has("fragment_properties") def testGet(self): m = Message(self.mp, self.dp, self.fp, "body") assert m.get("message_properties") == self.mp assert m.get("delivery_properties") == self.dp assert m.get("fragment_properties") == self.fp def testSet(self): m = Message(self.mp, self.dp, "body") assert m.get("fragment_properties") is None m.set(self.fp) assert m.get("fragment_properties") == self.fp def testSetOnEmpty(self): m = Message("body") assert m.get("delivery_properties") is None m.set(self.dp) assert m.get("delivery_properties") == self.dp def testSetReplace(self): m = Message(self.mp, self.dp, self.fp, "body") dp = DeliveryProperties() assert m.get("delivery_properties") == self.dp assert m.get("delivery_properties") != dp m.set(dp) assert m.get("delivery_properties") != self.dp assert m.get("delivery_properties") == dp def testClear(self): m = Message(self.mp, self.dp, self.fp, "body") assert m.get("message_properties") == self.mp assert m.get("delivery_properties") == self.dp assert m.get("fragment_properties") == self.fp m.clear("fragment_properties") assert m.get("fragment_properties") is None assert m.get("message_properties") == self.mp assert m.get("delivery_properties") == self.dp class TimestampTest(TestCase): def check(self, expected, *values): for v in values: assert isinstance(v, timestamp) assert v == expected assert v == timestamp(expected) def testAdd(self): self.check(4.0, timestamp(2.0) + 2.0, 2.0 + timestamp(2.0)) def testSub(self): self.check(2.0, timestamp(4.0) - 2.0, 4.0 - timestamp(2.0)) def testNeg(self): self.check(-4.0, -timestamp(4.0)) def testPos(self): self.check(+4.0, +timestamp(4.0)) def testAbs(self): self.check(4.0, abs(timestamp(-4.0))) def testConversion(self): dt = timestamp(0).datetime() t = timestamp(dt) assert t == 0 qpid-python-0.22/python/qpid/tests/codec.py0000644000175000017500000004766111336167172017065 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest from qpid.codec import Codec from qpid.spec08 import load from cStringIO import StringIO from qpid.reference import ReferenceId __doc__ = """ This is a unit test script for qpid/codec.py It can be run standalone or as part of the existing test framework. To run standalone: ------------------- Place in the qpid/python/tests/ directory and type... python codec.py A brief output will be printed on screen. The verbose output will be placed inn a file called codec_unit_test_output.txt. [TODO: make this filename configurable] To run as part of the existing test framework: ----------------------------------------------- python run-tests tests.codec Change History: ----------------- Jimmy John 05/19/2007 Initial draft Jimmy John 05/22/2007 Implemented comments by Rafael Schloming """ from qpid.specs_config import amqp_spec_0_8 SPEC = load(amqp_spec_0_8) # -------------------------------------- # -------------------------------------- class BaseDataTypes(unittest.TestCase): """ Base class containing common functions """ # --------------- def setUp(self): """ standard setUp for unitetest (refer unittest documentation for details) """ self.codec = Codec(StringIO(), SPEC) # ------------------ def tearDown(self): """ standard tearDown for unitetest (refer unittest documentation for details) """ self.codec.stream.flush() self.codec.stream.close() # ---------------------------------------- def callFunc(self, functionName, *args): """ helper function - given a function name and arguments, calls the function with the args and returns the contents of the stream """ getattr(self.codec, functionName)(args[0]) return self.codec.stream.getvalue() # ---------------------------------------- def readFunc(self, functionName, *args): """ helper function - creates a input stream and then calls the function with arguments as have been supplied """ self.codec.stream = StringIO(args[0]) return getattr(self.codec, functionName)() # ---------------------------------------- # ---------------------------------------- class IntegerTestCase(BaseDataTypes): """ Handles octet, short, long, long long """ # ------------------------- def __init__(self, *args): """ sets constants for use in tests """ BaseDataTypes.__init__(self, *args) self.const_integer = 2 self.const_integer_octet_encoded = '\x02' self.const_integer_short_encoded = '\x00\x02' self.const_integer_long_encoded = '\x00\x00\x00\x02' self.const_integer_long_long_encoded = '\x00\x00\x00\x00\x00\x00\x00\x02' # -------------------------- # # Unsigned Octect - 8 bits # # -------------------------- # # -------------------------- def test_unsigned_octet(self): """ ubyte format requires 0<=number<=255 """ self.failUnlessEqual(self.callFunc('encode_octet', self.const_integer), self.const_integer_octet_encoded, 'octect encoding FAILED...') # ------------------------------------------- def test_octet_out_of_upper_range(self): """ testing for input above acceptable range """ self.failUnlessRaises(Exception, self.codec.encode_octet, 256) # ------------------------------------------- def test_uoctet_out_of_lower_range(self): """ testing for input below acceptable range """ self.failUnlessRaises(Exception, self.codec.encode_octet, -1) # --------------------------------- def test_uoctet_with_fraction(self): """ the fractional part should be ignored... """ self.failUnlessEqual(self.callFunc('encode_octet', 2.5), self.const_integer_octet_encoded, 'octect encoding FAILED with fractions...') # ------------------------------------ def test_unsigned_octet_decode(self): """ octet decoding """ self.failUnlessEqual(self.readFunc('decode_octet', self.const_integer_octet_encoded), self.const_integer, 'octect decoding FAILED...') # ----------------------------------- # # Unsigned Short Integers - 16 bits # # ----------------------------------- # # ----------------------- def test_ushort_int(self): """ testing unsigned short integer """ self.failUnlessEqual(self.callFunc('encode_short', self.const_integer), self.const_integer_short_encoded, 'short encoding FAILED...') # ------------------------------------------- def test_ushort_int_out_of_upper_range(self): """ testing for input above acceptable range """ self.failUnlessRaises(Exception, self.codec.encode_short, 65536) # ------------------------------------------- def test_ushort_int_out_of_lower_range(self): """ testing for input below acceptable range """ self.failUnlessRaises(Exception, self.codec.encode_short, -1) # --------------------------------- def test_ushort_int_with_fraction(self): """ the fractional part should be ignored... """ self.failUnlessEqual(self.callFunc('encode_short', 2.5), self.const_integer_short_encoded, 'short encoding FAILED with fractions...') # ------------------------------------ def test_ushort_int_decode(self): """ unsigned short decoding """ self.failUnlessEqual(self.readFunc('decode_short', self.const_integer_short_encoded), self.const_integer, 'unsigned short decoding FAILED...') # ---------------------------------- # # Unsigned Long Integers - 32 bits # # ---------------------------------- # # ----------------------- def test_ulong_int(self): """ testing unsigned long iteger """ self.failUnlessEqual(self.callFunc('encode_long', self.const_integer), self.const_integer_long_encoded, 'long encoding FAILED...') # ------------------------------------------- def test_ulong_int_out_of_upper_range(self): """ testing for input above acceptable range """ self.failUnlessRaises(Exception, self.codec.encode_long, 4294967296) # ------------------------------------------- def test_ulong_int_out_of_lower_range(self): """ testing for input below acceptable range """ self.failUnlessRaises(Exception, self.codec.encode_long, -1) # --------------------------------- def test_ulong_int_with_fraction(self): """ the fractional part should be ignored... """ self.failUnlessEqual(self.callFunc('encode_long', 2.5), self.const_integer_long_encoded, 'long encoding FAILED with fractions...') # ------------------------------- def test_ulong_int_decode(self): """ unsigned long decoding """ self.failUnlessEqual(self.readFunc('decode_long', self.const_integer_long_encoded), self.const_integer, 'unsigned long decoding FAILED...') # --------------------------------------- # # Unsigned Long Long Integers - 64 bits # # --------------------------------------- # # ----------------------- def test_ulong_long_int(self): """ testing unsinged long long integer """ self.failUnlessEqual(self.callFunc('encode_longlong', self.const_integer), self.const_integer_long_long_encoded, 'long long encoding FAILED...') # ------------------------------------------- def test_ulong_long_int_out_of_upper_range(self): """ testing for input above acceptable range """ self.failUnlessRaises(Exception, self.codec.encode_longlong, 18446744073709551616) # ------------------------------------------- def test_ulong_long_int_out_of_lower_range(self): """ testing for input below acceptable range """ self.failUnlessRaises(Exception, self.codec.encode_longlong, -1) # --------------------------------- def test_ulong_long_int_with_fraction(self): """ the fractional part should be ignored... """ self.failUnlessEqual(self.callFunc('encode_longlong', 2.5), self.const_integer_long_long_encoded, 'long long encoding FAILED with fractions...') # ------------------------------------ def test_ulong_long_int_decode(self): """ unsigned long long decoding """ self.failUnlessEqual(self.readFunc('decode_longlong', self.const_integer_long_long_encoded), self.const_integer, 'unsigned long long decoding FAILED...') # ----------------------------------- # ----------------------------------- class BitTestCase(BaseDataTypes): """ Handles bits """ # ---------------------------------------------- def callFunc(self, functionName, *args): """ helper function """ for ele in args: getattr(self.codec, functionName)(ele) self.codec.flush() return self.codec.stream.getvalue() # ------------------- def test_bit1(self): """ sends in 11 """ self.failUnlessEqual(self.callFunc('encode_bit', 1, 1), '\x03', '11 bit encoding FAILED...') # ------------------- def test_bit2(self): """ sends in 10011 """ self.failUnlessEqual(self.callFunc('encode_bit', 1, 1, 0, 0, 1), '\x13', '10011 bit encoding FAILED...') # ------------------- def test_bit3(self): """ sends in 1110100111 [10 bits(right to left), should be compressed into two octets] """ self.failUnlessEqual(self.callFunc('encode_bit', 1,1,1,0,0,1,0,1,1,1), '\xa7\x03', '1110100111(right to left) bit encoding FAILED...') # ------------------------------------ def test_bit_decode_1(self): """ decode bit 1 """ self.failUnlessEqual(self.readFunc('decode_bit', '\x01'), 1, 'decode bit 1 FAILED...') # ------------------------------------ def test_bit_decode_0(self): """ decode bit 0 """ self.failUnlessEqual(self.readFunc('decode_bit', '\x00'), 0, 'decode bit 0 FAILED...') # ----------------------------------- # ----------------------------------- class StringTestCase(BaseDataTypes): """ Handles short strings, long strings """ # ------------------------------------------------------------- # # Short Strings - 8 bit length followed by zero or more octets # # ------------------------------------------------------------- # # --------------------------------------- def test_short_string_zero_length(self): """ 0 length short string """ self.failUnlessEqual(self.callFunc('encode_shortstr', ''), '\x00', '0 length short string encoding FAILED...') # ------------------------------------------- def test_short_string_positive_length(self): """ positive length short string """ self.failUnlessEqual(self.callFunc('encode_shortstr', 'hello world'), '\x0bhello world', 'positive length short string encoding FAILED...') # ------------------------------------------- def test_short_string_out_of_upper_range(self): """ string length > 255 """ self.failUnlessRaises(Exception, self.codec.encode_shortstr, 'x'*256) # ------------------------------------ def test_short_string_decode(self): """ short string decode """ self.failUnlessEqual(self.readFunc('decode_shortstr', '\x0bhello world'), 'hello world', 'short string decode FAILED...') # ------------------------------------------------------------- # # Long Strings - 32 bit length followed by zero or more octets # # ------------------------------------------------------------- # # --------------------------------------- def test_long_string_zero_length(self): """ 0 length long string """ self.failUnlessEqual(self.callFunc('encode_longstr', ''), '\x00\x00\x00\x00', '0 length long string encoding FAILED...') # ------------------------------------------- def test_long_string_positive_length(self): """ positive length long string """ self.failUnlessEqual(self.callFunc('encode_longstr', 'hello world'), '\x00\x00\x00\x0bhello world', 'positive length long string encoding FAILED...') # ------------------------------------ def test_long_string_decode(self): """ long string decode """ self.failUnlessEqual(self.readFunc('decode_longstr', '\x00\x00\x00\x0bhello world'), 'hello world', 'long string decode FAILED...') # -------------------------------------- # -------------------------------------- class TimestampTestCase(BaseDataTypes): """ No need of any test cases here as timestamps are implemented as long long which is tested above """ pass # --------------------------------------- # --------------------------------------- class FieldTableTestCase(BaseDataTypes): """ Handles Field Tables Only S/I type messages seem to be implemented currently """ # ------------------------- def __init__(self, *args): """ sets constants for use in tests """ BaseDataTypes.__init__(self, *args) self.const_field_table_dummy_dict = {'$key1':'value1','$key2':'value2'} self.const_field_table_dummy_dict_encoded = '\x00\x00\x00\x22\x05$key2S\x00\x00\x00\x06value2\x05$key1S\x00\x00\x00\x06value1' # ------------------------------------------- def test_field_table_name_value_pair(self): """ valid name value pair """ self.failUnlessEqual(self.callFunc('encode_table', {'$key1':'value1'}), '\x00\x00\x00\x11\x05$key1S\x00\x00\x00\x06value1', 'valid name value pair encoding FAILED...') # --------------------------------------------------- def test_field_table_multiple_name_value_pair(self): """ multiple name value pair """ self.failUnlessEqual(self.callFunc('encode_table', self.const_field_table_dummy_dict), self.const_field_table_dummy_dict_encoded, 'multiple name value pair encoding FAILED...') # ------------------------------------ def test_field_table_decode(self): """ field table decode """ self.failUnlessEqual(self.readFunc('decode_table', self.const_field_table_dummy_dict_encoded), self.const_field_table_dummy_dict, 'field table decode FAILED...') # ------------------------------------ # ------------------------------------ class ContentTestCase(BaseDataTypes): """ Handles Content data types """ # ----------------------------- def test_content_inline(self): """ inline content """ self.failUnlessEqual(self.callFunc('encode_content', 'hello inline message'), '\x00\x00\x00\x00\x14hello inline message', 'inline content encoding FAILED...') # -------------------------------- def test_content_reference(self): """ reference content """ self.failUnlessEqual(self.callFunc('encode_content', ReferenceId('dummyId')), '\x01\x00\x00\x00\x07dummyId', 'reference content encoding FAILED...') # ------------------------------------ def test_content_inline_decode(self): """ inline content decode """ self.failUnlessEqual(self.readFunc('decode_content', '\x00\x00\x00\x00\x14hello inline message'), 'hello inline message', 'inline content decode FAILED...') # ------------------------------------ def test_content_reference_decode(self): """ reference content decode """ self.failUnlessEqual(self.readFunc('decode_content', '\x01\x00\x00\x00\x07dummyId').id, 'dummyId', 'reference content decode FAILED...') # ------------------------ # # Pre - existing test code # # ------------------------ # # --------------------- def test(type, value): """ old test function cut/copy/paste from qpid/codec.py """ if isinstance(value, (list, tuple)): values = value else: values = [value] stream = StringIO() codec = Codec(stream, SPEC) for v in values: codec.encode(type, v) codec.flush() enc = stream.getvalue() stream.reset() dup = [] for i in xrange(len(values)): dup.append(codec.decode(type)) if values != dup: raise AssertionError("%r --> %r --> %r" % (values, enc, dup)) # ----------------------- def dotest(type, value): """ old test function cut/copy/paste from qpid/codec.py """ args = (type, value) test(*args) # ------------- def oldtests(): """ old test function cut/copy/paste from qpid/codec.py """ for value in ("1", "0", "110", "011", "11001", "10101", "10011"): for i in range(10): dotest("bit", map(lambda x: x == "1", value*i)) for value in ({}, {"asdf": "fdsa", "fdsa": 1, "three": 3}, {"one": 1}): dotest("table", value) for type in ("octet", "short", "long", "longlong"): for value in range(0, 256): dotest(type, value) for type in ("shortstr", "longstr"): for value in ("", "a", "asdf"): dotest(type, value) # ----------------------------------------- class oldTests(unittest.TestCase): """ class to handle pre-existing test cases """ # --------------------------- def test_oldtestcases(self): """ call the old tests """ return oldtests() # --------------------------- # --------------------------- if __name__ == '__main__': codec_test_suite = unittest.TestSuite() #adding all the test suites... codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(IntegerTestCase)) codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(BitTestCase)) codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(StringTestCase)) codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TimestampTestCase)) codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FieldTableTestCase)) codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ContentTestCase)) #loading pre-existing test case from qpid/codec.py codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(oldTests)) run_output_stream = StringIO() test_runner = unittest.TextTestRunner(run_output_stream, '', '') test_result = test_runner.run(codec_test_suite) print '\n%d test run...' % (test_result.testsRun) if test_result.wasSuccessful(): print '\nAll tests successful\n' if test_result.failures: print '\n----------' print '%d FAILURES:' % (len(test_result.failures)) print '----------\n' for failure in test_result.failures: print str(failure[0]) + ' ... FAIL' if test_result.errors: print '\n---------' print '%d ERRORS:' % (len(test_result.errors)) print '---------\n' for error in test_result.errors: print str(error[0]) + ' ... ERROR' f = open('codec_unit_test_output.txt', 'w') f.write(str(run_output_stream.getvalue())) f.close() qpid-python-0.22/python/qpid/parser.py0000644000175000017500000000372211307763616016133 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # class ParseError(Exception): def __init__(self, token, *expected): line, ln, col = token.line_info() exp = ", ".join(map(str, expected)) if len(expected) > 1: exp = "(%s)" % exp if expected: msg = "expecting %s, got %s line:%s,%s:%s" % (exp, token, ln, col, line) else: msg = "unexpected token %s line:%s,%s:%s" % (token, ln, col, line) Exception.__init__(self, msg) self.token = token self.expected = expected class Parser: def __init__(self, tokens): self.tokens = tokens self.idx = 0 def next(self): return self.tokens[self.idx] def matches(self, *types): return self.next().type in types def eat(self, *types): if types and not self.matches(*types): raise ParseError(self.next(), *types) else: t = self.next() self.idx += 1 return t def eat_until(self, *types): result = [] while not self.matches(*types): result.append(self.eat()) return result def remove(self, start, end): start_idx = self.tokens.index(start) end_idx = self.tokens.index(end) + 1 del self.tokens[start_idx:end_idx] self.idx -= end_idx - start_idx def reset(self): self.idx = 0 qpid-python-0.22/python/qpid/connection.py0000644000175000017500000001552412004253175016766 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import datatypes, session from threading import Thread, Condition, RLock from util import wait, notify from codec010 import StringCodec from framing import * from session import Session from generator import control_invoker from exceptions import * from logging import getLogger import delegates, socket import sys class ChannelBusy(Exception): pass class ChannelsBusy(Exception): pass class SessionBusy(Exception): pass class ConnectionFailed(Exception): pass def client(*args, **kwargs): return delegates.Client(*args, **kwargs) def server(*args, **kwargs): return delegates.Server(*args, **kwargs) from framer import Framer class Connection(Framer): def __init__(self, sock, delegate=client, **args): Framer.__init__(self, sock) self.lock = RLock() self.attached = {} self.sessions = {} self.condition = Condition() # XXX: we should combine this into a single comprehensive state # model (whatever that means) self.opened = False self.failed = False self.closed = False self.close_code = (None, "connection aborted") self.thread = Thread(target=self.run) self.thread.setDaemon(True) self.channel_max = 65535 self.user_id = None self.op_enc = OpEncoder() self.seg_enc = SegmentEncoder() self.frame_enc = FrameEncoder() self.delegate = delegate(self, **args) def attach(self, name, ch, delegate, force=False): self.lock.acquire() try: ssn = self.attached.get(ch.id) if ssn is not None: if ssn.name != name: raise ChannelBusy(ch, ssn) else: ssn = self.sessions.get(name) if ssn is None: ssn = Session(name, delegate=delegate) self.sessions[name] = ssn elif ssn.channel is not None: if force: del self.attached[ssn.channel.id] ssn.channel = None else: raise SessionBusy(ssn) self.attached[ch.id] = ssn ssn.channel = ch ch.session = ssn return ssn finally: self.lock.release() def detach(self, name, ch): self.lock.acquire() try: self.attached.pop(ch.id, None) ssn = self.sessions.pop(name, None) if ssn is not None: ssn.channel = None ssn.closed() return ssn finally: self.lock.release() def __channel(self): for i in xrange(1, self.channel_max): if not self.attached.has_key(i): return i else: raise ChannelsBusy() def session(self, name, timeout=None, delegate=session.client): self.lock.acquire() try: ch = Channel(self, self.__channel()) ssn = self.attach(name, ch, delegate) ssn.channel.session_attach(name) if wait(ssn.condition, lambda: ssn.channel is not None, timeout): return ssn else: self.detach(name, ch) raise Timeout() finally: self.lock.release() def detach_all(self): self.lock.acquire() self.failed = True try: for ssn in self.attached.values(): if self.close_code[0] != 200: ssn.exceptions.append(self.close_code) self.detach(ssn.name, ssn.channel) finally: self.lock.release() def start(self, timeout=None): self.delegate.start() self.thread.start() if not wait(self.condition, lambda: self.opened or self.failed, timeout): self.thread.join() raise Timeout() if self.failed: self.thread.join() raise ConnectionFailed(*self.close_code) def run(self): frame_dec = FrameDecoder() seg_dec = SegmentDecoder() op_dec = OpDecoder() while not self.closed: try: data = self.sock.recv(64*1024) if not data: self.detach_all() break # If we have a security layer and it sends us no decoded data, # that's OK as long as its return code is happy. if self.security_layer_rx: try: data = self.security_layer_rx.decode(data) except: self.detach_all() break # When we do not use SSL transport, we get periodic # spurious timeout events on the socket. When using SSL, # these events show up as timeout *errors*. Both should be # ignored unless we have aborted. except socket.timeout: if self.aborted(): self.close_code = (None, "connection timed out") self.detach_all() break else: continue except socket.error, e: if self.aborted() or str(e) != "The read operation timed out": self.close_code = (None, str(e)) self.detach_all() break else: continue frame_dec.write(data) seg_dec.write(*frame_dec.read()) op_dec.write(*seg_dec.read()) for op in op_dec.read(): try: self.delegate.received(op) except Closed, e: self.close_code = (None, str(e)) if not self.opened: self.failed = True self.closed = True notify(self.condition) self.sock.close() def write_op(self, op): self.sock_lock.acquire() try: self.op_enc.write(op) self.seg_enc.write(*self.op_enc.read()) self.frame_enc.write(*self.seg_enc.read()) bytes = self.frame_enc.read() self.write(bytes) self.flush() finally: self.sock_lock.release() def close(self, timeout=None): if not self.opened: return Channel(self, 0).connection_close(200) if not wait(self.condition, lambda: not self.opened, timeout): raise Timeout() self.thread.join(timeout=timeout) def __str__(self): return "%s:%s" % self.sock.getsockname() def __repr__(self): return str(self) log = getLogger("qpid.io.ctl") class Channel(control_invoker()): def __init__(self, connection, id): self.connection = connection self.id = id self.session = None def invoke(self, op, args, kwargs): ctl = op(*args, **kwargs) ctl.channel = self.id self.connection.write_op(ctl) log.debug("SENT %s", ctl) def __str__(self): return "%s[%s]" % (self.connection, self.id) def __repr__(self): return str(self) qpid-python-0.22/python/qpid/framing.py0000644000175000017500000002036411326041427016251 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import struct FIRST_SEG = 0x08 LAST_SEG = 0x04 FIRST_FRM = 0x02 LAST_FRM = 0x01 class Frame: HEADER = "!2BHxBH4x" HEADER_SIZE = struct.calcsize(HEADER) MAX_PAYLOAD = 65535 - struct.calcsize(HEADER) def __init__(self, flags, type, track, channel, payload): if len(payload) > Frame.MAX_PAYLOAD: raise ValueError("max payload size exceeded: %s" % len(payload)) self.flags = flags self.type = type self.track = track self.channel = channel self.payload = payload def isFirstSegment(self): return bool(FIRST_SEG & self.flags) def isLastSegment(self): return bool(LAST_SEG & self.flags) def isFirstFrame(self): return bool(FIRST_FRM & self.flags) def isLastFrame(self): return bool(LAST_FRM & self.flags) def __repr__(self): return "%s%s%s%s %s %s %s %r" % (int(self.isFirstSegment()), int(self.isLastSegment()), int(self.isFirstFrame()), int(self.isLastFrame()), self.type, self.track, self.channel, self.payload) class Segment: def __init__(self, first, last, type, track, channel, payload): self.id = None self.offset = None self.first = first self.last = last self.type = type self.track = track self.channel = channel self.payload = payload def __repr__(self): return "%s%s %s %s %s %r" % (int(self.first), int(self.last), self.type, self.track, self.channel, self.payload) class FrameDecoder: def __init__(self): self.input = "" self.output = [] self.parse = self.__frame_header def write(self, bytes): self.input += bytes while True: next = self.parse() if next is None: break else: self.parse = next def __consume(self, n): result = self.input[:n] self.input = self.input[n:] return result def __frame_header(self): if len(self.input) >= Frame.HEADER_SIZE: st = self.__consume(Frame.HEADER_SIZE) self.flags, self.type, self.size, self.track, self.channel = \ struct.unpack(Frame.HEADER, st) return self.__frame_body def __frame_body(self): size = self.size - Frame.HEADER_SIZE if len(self.input) >= size: payload = self.__consume(size) frame = Frame(self.flags, self.type, self.track, self.channel, payload) self.output.append(frame) return self.__frame_header def read(self): result = self.output self.output = [] return result class FrameEncoder: def __init__(self): self.output = "" def write(self, *frames): for frame in frames: size = len(frame.payload) + Frame.HEADER_SIZE track = frame.track & 0x0F self.output += struct.pack(Frame.HEADER, frame.flags, frame.type, size, track, frame.channel) self.output += frame.payload def read(self): result = self.output self.output = "" return result class SegmentDecoder: def __init__(self): self.fragments = {} self.segments = [] def write(self, *frames): for frm in frames: key = (frm.channel, frm.track) seg = self.fragments.get(key) if seg == None: seg = Segment(frm.isFirstSegment(), frm.isLastSegment(), frm.type, frm.track, frm.channel, "") self.fragments[key] = seg seg.payload += frm.payload if frm.isLastFrame(): self.fragments.pop(key) self.segments.append(seg) def read(self): result = self.segments self.segments = [] return result class SegmentEncoder: def __init__(self, max_payload=Frame.MAX_PAYLOAD): self.max_payload = max_payload self.frames = [] def write(self, *segments): for seg in segments: remaining = seg.payload first = True while first or remaining: payload = remaining[:self.max_payload] remaining = remaining[self.max_payload:] flags = 0 if first: flags |= FIRST_FRM first = False if not remaining: flags |= LAST_FRM if seg.first: flags |= FIRST_SEG if seg.last: flags |= LAST_SEG frm = Frame(flags, seg.type, seg.track, seg.channel, payload) self.frames.append(frm) def read(self): result = self.frames self.frames = [] return result from ops import COMMANDS, CONTROLS, COMPOUND, Header, segment_type, track from codec010 import StringCodec class OpEncoder: def __init__(self): self.segments = [] def write(self, *ops): for op in ops: if COMMANDS.has_key(op.NAME): seg_type = segment_type.command seg_track = track.command enc = self.encode_command(op) elif CONTROLS.has_key(op.NAME): seg_type = segment_type.control seg_track = track.control enc = self.encode_compound(op) else: raise ValueError(op) seg = Segment(True, False, seg_type, seg_track, op.channel, enc) self.segments.append(seg) if hasattr(op, "headers") and op.headers is not None: hdrs = "" for h in op.headers: hdrs += self.encode_compound(h) seg = Segment(False, False, segment_type.header, seg_track, op.channel, hdrs) self.segments.append(seg) if hasattr(op, "payload") and op.payload is not None: self.segments.append(Segment(False, False, segment_type.body, seg_track, op.channel, op.payload)) self.segments[-1].last = True def encode_command(self, cmd): sc = StringCodec() sc.write_uint16(cmd.CODE) sc.write_compound(Header(sync=cmd.sync)) sc.write_fields(cmd) return sc.encoded def encode_compound(self, op): sc = StringCodec() sc.write_compound(op) return sc.encoded def read(self): result = self.segments self.segments = [] return result class OpDecoder: def __init__(self): self.op = None self.ops = [] def write(self, *segments): for seg in segments: if seg.first: if seg.type == segment_type.command: self.op = self.decode_command(seg.payload) elif seg.type == segment_type.control: self.op = self.decode_control(seg.payload) else: raise ValueError(seg) self.op.channel = seg.channel elif seg.type == segment_type.header: if self.op.headers is None: self.op.headers = [] self.op.headers.extend(self.decode_headers(seg.payload)) elif seg.type == segment_type.body: if self.op.payload is None: self.op.payload = seg.payload else: self.op.payload += seg.payload if seg.last: self.ops.append(self.op) self.op = None def decode_command(self, encoded): sc = StringCodec(encoded) code = sc.read_uint16() cls = COMMANDS[code] hdr = sc.read_compound(Header) cmd = cls() sc.read_fields(cmd) cmd.sync = hdr.sync return cmd def decode_control(self, encoded): sc = StringCodec(encoded) code = sc.read_uint16() cls = CONTROLS[code] ctl = cls() sc.read_fields(ctl) return ctl def decode_headers(self, encoded): sc = StringCodec(encoded) result = [] while sc.encoded: result.append(sc.read_struct32()) return result def read(self): result = self.ops self.ops = [] return result qpid-python-0.22/python/qpid/codec010.py0000644000175000017500000002423511535457532016137 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import datetime, string from packer import Packer from datatypes import serial, timestamp, RangedSet, Struct, UUID from ops import Compound, PRIMITIVE, COMPOUND class CodecException(Exception): pass def direct(t): return lambda x: t def map_str(s): for c in s: if ord(c) >= 0x80: return "vbin16" return "str16" class Codec(Packer): ENCODINGS = { bool: direct("boolean"), unicode: direct("str16"), str: map_str, buffer: direct("vbin32"), int: direct("int64"), long: direct("int64"), float: direct("double"), None.__class__: direct("void"), list: direct("list"), tuple: direct("list"), dict: direct("map"), timestamp: direct("datetime"), datetime.datetime: direct("datetime"), UUID: direct("uuid"), Compound: direct("struct32") } def encoding(self, obj): enc = self._encoding(obj.__class__, obj) if enc is None: raise CodecException("no encoding for %r" % obj) return PRIMITIVE[enc] def _encoding(self, klass, obj): if self.ENCODINGS.has_key(klass): return self.ENCODINGS[klass](obj) for base in klass.__bases__: result = self._encoding(base, obj) if result != None: return result def read_primitive(self, type): return getattr(self, "read_%s" % type.NAME)() def write_primitive(self, type, v): getattr(self, "write_%s" % type.NAME)(v) def read_void(self): return None def write_void(self, v): assert v == None def read_bit(self): return True def write_bit(self, b): if not b: raise ValueError(b) def read_uint8(self): return self.unpack("!B") def write_uint8(self, n): if n < 0 or n > 255: raise CodecException("Cannot encode %d as uint8" % n) return self.pack("!B", n) def read_int8(self): return self.unpack("!b") def write_int8(self, n): if n < -128 or n > 127: raise CodecException("Cannot encode %d as int8" % n) self.pack("!b", n) def read_char(self): return self.unpack("!c") def write_char(self, c): self.pack("!c", c) def read_boolean(self): return self.read_uint8() != 0 def write_boolean(self, b): if b: n = 1 else: n = 0 self.write_uint8(n) def read_uint16(self): return self.unpack("!H") def write_uint16(self, n): if n < 0 or n > 65535: raise CodecException("Cannot encode %d as uint16" % n) self.pack("!H", n) def read_int16(self): return self.unpack("!h") def write_int16(self, n): if n < -32768 or n > 32767: raise CodecException("Cannot encode %d as int16" % n) self.pack("!h", n) def read_uint32(self): return self.unpack("!L") def write_uint32(self, n): if n < 0 or n > 4294967295: raise CodecException("Cannot encode %d as uint32" % n) self.pack("!L", n) def read_int32(self): return self.unpack("!l") def write_int32(self, n): if n < -2147483648 or n > 2147483647: raise CodecException("Cannot encode %d as int32" % n) self.pack("!l", n) def read_float(self): return self.unpack("!f") def write_float(self, f): self.pack("!f", f) def read_sequence_no(self): return serial(self.read_uint32()) def write_sequence_no(self, n): self.write_uint32(n.value) def read_uint64(self): return self.unpack("!Q") def write_uint64(self, n): self.pack("!Q", n) def read_int64(self): return self.unpack("!q") def write_int64(self, n): self.pack("!q", n) def read_datetime(self): return timestamp(self.read_uint64()) def write_datetime(self, t): if isinstance(t, datetime.datetime): t = timestamp(t) self.write_uint64(t) def read_double(self): return self.unpack("!d") def write_double(self, d): self.pack("!d", d) def read_vbin8(self): return self.read(self.read_uint8()) def write_vbin8(self, b): if isinstance(b, buffer): b = str(b) self.write_uint8(len(b)) self.write(b) def read_str8(self): return self.read_vbin8().decode("utf8") def write_str8(self, s): self.write_vbin8(s.encode("utf8")) def read_str16(self): return self.read_vbin16().decode("utf8") def write_str16(self, s): self.write_vbin16(s.encode("utf8")) def read_str16_latin(self): return self.read_vbin16().decode("iso-8859-15") def write_str16_latin(self, s): self.write_vbin16(s.encode("iso-8859-15")) def read_vbin16(self): return self.read(self.read_uint16()) def write_vbin16(self, b): if isinstance(b, buffer): b = str(b) self.write_uint16(len(b)) self.write(b) def read_sequence_set(self): result = RangedSet() size = self.read_uint16() nranges = size/8 while nranges > 0: lower = self.read_sequence_no() upper = self.read_sequence_no() result.add(lower, upper) nranges -= 1 return result def write_sequence_set(self, ss): size = 8*len(ss.ranges) self.write_uint16(size) for range in ss.ranges: self.write_sequence_no(range.lower) self.write_sequence_no(range.upper) def read_vbin32(self): return self.read(self.read_uint32()) def write_vbin32(self, b): if isinstance(b, buffer): b = str(b) self.write_uint32(len(b)) self.write(b) def read_map(self): sc = StringCodec(self.read_vbin32()) if not sc.encoded: return None count = sc.read_uint32() result = {} while sc.encoded: k = sc.read_str8() code = sc.read_uint8() type = PRIMITIVE[code] v = sc.read_primitive(type) result[k] = v return result def _write_map_elem(self, k, v): type = self.encoding(v) sc = StringCodec() sc.write_str8(k) sc.write_uint8(type.CODE) sc.write_primitive(type, v) return sc.encoded def write_map(self, m): sc = StringCodec() if m is not None: sc.write_uint32(len(m)) sc.write(string.joinfields(map(self._write_map_elem, m.keys(), m.values()), "")) self.write_vbin32(sc.encoded) def read_array(self): sc = StringCodec(self.read_vbin32()) if not sc.encoded: return None type = PRIMITIVE[sc.read_uint8()] count = sc.read_uint32() result = [] while count > 0: result.append(sc.read_primitive(type)) count -= 1 return result def write_array(self, a): sc = StringCodec() if a is not None: if len(a) > 0: type = self.encoding(a[0]) else: type = self.encoding(None) sc.write_uint8(type.CODE) sc.write_uint32(len(a)) for o in a: sc.write_primitive(type, o) self.write_vbin32(sc.encoded) def read_list(self): sc = StringCodec(self.read_vbin32()) if not sc.encoded: return None count = sc.read_uint32() result = [] while count > 0: type = PRIMITIVE[sc.read_uint8()] result.append(sc.read_primitive(type)) count -= 1 return result def write_list(self, l): sc = StringCodec() if l is not None: sc.write_uint32(len(l)) for o in l: type = self.encoding(o) sc.write_uint8(type.CODE) sc.write_primitive(type, o) self.write_vbin32(sc.encoded) def read_struct32(self): size = self.read_uint32() code = self.read_uint16() cls = COMPOUND[code] op = cls() self.read_fields(op) return op def write_struct32(self, value): self.write_compound(value) def read_compound(self, cls): size = self.read_size(cls.SIZE) if cls.CODE is not None: code = self.read_uint16() assert code == cls.CODE op = cls() self.read_fields(op) return op def write_compound(self, op): sc = StringCodec() if op.CODE is not None: sc.write_uint16(op.CODE) sc.write_fields(op) self.write_size(op.SIZE, len(sc.encoded)) self.write(sc.encoded) def read_fields(self, op): flags = 0 for i in range(op.PACK): flags |= (self.read_uint8() << 8*i) for i in range(len(op.FIELDS)): f = op.FIELDS[i] if flags & (0x1 << i): if COMPOUND.has_key(f.type): value = self.read_compound(COMPOUND[f.type]) else: value = getattr(self, "read_%s" % f.type)() setattr(op, f.name, value) def write_fields(self, op): flags = 0 for i in range(len(op.FIELDS)): f = op.FIELDS[i] value = getattr(op, f.name) if f.type == "bit": present = value else: present = value != None if present: flags |= (0x1 << i) for i in range(op.PACK): self.write_uint8((flags >> 8*i) & 0xFF) for i in range(len(op.FIELDS)): f = op.FIELDS[i] if flags & (0x1 << i): if COMPOUND.has_key(f.type): enc = self.write_compound else: enc = getattr(self, "write_%s" % f.type) value = getattr(op, f.name) enc(value) def read_size(self, width): if width > 0: attr = "read_uint%d" % (width*8) return getattr(self, attr)() def write_size(self, width, n): if width > 0: attr = "write_uint%d" % (width*8) getattr(self, attr)(n) def read_uuid(self): return UUID(bytes=self.unpack("16s")) def write_uuid(self, s): if isinstance(s, UUID): s = s.bytes self.pack("16s", s) def read_bin128(self): return self.unpack("16s") def write_bin128(self, b): self.pack("16s", b) class StringCodec(Codec): def __init__(self, encoded = ""): self.encoded = encoded def read(self, n): result = self.encoded[:n] self.encoded = self.encoded[n:] return result def write(self, s): self.encoded += s qpid-python-0.22/python/qpid/lexer.py0000644000175000017500000000534411325626650015754 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import re class Type: def __init__(self, name, pattern=None): self.name = name self.pattern = pattern def __repr__(self): return self.name class Lexicon: def __init__(self): self.types = [] self._eof = None def define(self, name, pattern): t = Type(name, pattern) self.types.append(t) return t def eof(self, name): t = Type(name) self._eof = t return t def compile(self): types = self.types[:] joined = "|".join(["(%s)" % t.pattern for t in types]) rexp = re.compile(joined) return Lexer(types, self._eof, rexp) class Token: def __init__(self, type, value, input, position): self.type = type self.value = value self.input = input self.position = position def line_info(self): return line_info(self.input, self.position) def __repr__(self): if self.value is None: return repr(self.type) else: return "%s(%s)" % (self.type, self.value) class LexError(Exception): pass def line_info(st, pos): idx = 0 lineno = 1 column = 0 line_pos = 0 while idx < pos: if st[idx] == "\n": lineno += 1 column = 0 line_pos = idx column += 1 idx += 1 end = st.find("\n", line_pos) if end < 0: end = len(st) line = st[line_pos:end] return line, lineno, column class Lexer: def __init__(self, types, eof, rexp): self.types = types self.eof = eof self.rexp = rexp self.byname = {} for t in self.types + [eof]: self.byname[t.name] = t def type(self, name): return self.byname[name] def lex(self, st): pos = 0 while pos < len(st): m = self.rexp.match(st, pos) if m is None: line, ln, col = line_info(st, pos) raise LexError("unrecognized characters line:%s,%s: %s" % (ln, col, line)) else: idx = m.lastindex t = Token(self.types[idx - 1], m.group(idx), st, pos) yield t pos = m.end() yield Token(self.eof, None, st, pos) qpid-python-0.22/python/qpid/sasl.py0000644000175000017500000000606312004253175015567 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import socket class SASLError(Exception): pass class WrapperClient: def __init__(self): self._cli = _Client() def setAttr(self, name, value): status = self._cli.setAttr(str(name), str(value)) if status and name == 'username': status = self._cli.setAttr('externaluser', str(value)) if not status: raise SASLError(self._cli.getError()) def init(self): status = self._cli.init() if not status: raise SASLError(self._cli.getError()) def start(self, mechanisms): status, mech, initial = self._cli.start(str(mechanisms)) if status: return mech, initial else: raise SASLError(self._cli.getError()) def step(self, challenge): status, response = self._cli.step(challenge) if status: return response else: raise SASLError(self._cli.getError()) def encode(self, bytes): status, result = self._cli.encode(bytes) if status: return result else: raise SASLError(self._cli.getError()) def decode(self, bytes): status, result = self._cli.decode(bytes) if status: return result else: raise SASLError(self._cli.getError()) def auth_username(self): status, result = self._cli.getUserId() if status: return result else: raise SASLError(self._cli.getError()) class PlainClient: def __init__(self): self.attrs = {} def setAttr(self, name, value): self.attrs[name] = value def init(self): pass def start(self, mechanisms): mechs = mechanisms.split() if self.attrs.get("username") and self.attrs.get("password") and "PLAIN" in mechs: return "PLAIN", "\0%s\0%s" % (self.attrs.get("username"), self.attrs.get("password")) elif "ANONYMOUS" in mechs: return "ANONYMOUS", "%s@%s" % (self.attrs.get("username"), socket.gethostname()) elif "EXTERNAL" in mechs: return "EXTERNAL", "%s" % (self.attrs.get("username")) else: raise SASLError("sasl negotiation failed: no mechanism agreed") def step(self, challenge): pass def encode(self, bytes): return bytes def decode(self, bytes): return bytes def auth_username(self): return self.attrs.get("username") try: from saslwrapper import Client as _Client Client = WrapperClient except ImportError: Client = PlainClient qpid-python-0.22/python/qpid/delegate.py0000644000175000017500000000312111003407066016365 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ Delegate implementation intended for use with the peer module. """ import threading, inspect, traceback, sys from connection08 import Method, Request, Response def _handler_name(method): return "%s_%s" % (method.klass.name, method.name) class Delegate: def __init__(self): self.handlers = {} self.invokers = {} def __call__(self, channel, frame): method = frame.method try: handler = self.handlers[method] except KeyError: name = _handler_name(method) handler = getattr(self, name) self.handlers[method] = handler try: return handler(channel, frame) except: print >> sys.stderr, "Error in handler: %s\n\n%s" % \ (_handler_name(method), traceback.format_exc()) def closed(self, reason): print "Connection closed: %s" % reason qpid-python-0.22/python/qpid/queue.py0000644000175000017500000000531111172413542015745 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ This module augments the standard python multithreaded Queue implementation to add a close() method so that threads blocking on the content of a queue can be notified if the queue is no longer in use. """ from Queue import Queue as BaseQueue, Empty, Full from threading import Thread from exceptions import Closed class Queue(BaseQueue): END = object() STOP = object() def __init__(self, *args, **kwargs): BaseQueue.__init__(self, *args, **kwargs) self.error = None self.listener = None self.exc_listener = None self.thread = None def close(self, error = None): self.error = error self.put(Queue.END) if self.thread is not None: self.thread.join() self.thread = None def get(self, block = True, timeout = None): result = BaseQueue.get(self, block, timeout) if result == Queue.END: # this guarantees that any other waiting threads or any future # calls to get will also result in a Closed exception self.put(Queue.END) raise Closed(self.error) else: return result def listen(self, listener, exc_listener = None): if listener is None and exc_listener is not None: raise ValueError("cannot set exception listener without setting listener") if listener is None: if self.thread is not None: self.put(Queue.STOP) # loop and timed join permit keyboard interrupts to work while self.thread.isAlive(): self.thread.join(3) self.thread = None self.listener = listener self.exc_listener = exc_listener if listener is not None and self.thread is None: self.thread = Thread(target = self.run) self.thread.setDaemon(True) self.thread.start() def run(self): while True: try: o = self.get() if o == Queue.STOP: break self.listener(o) except Closed, e: if self.exc_listener is not None: self.exc_listener(e) break qpid-python-0.22/python/qpid/datatypes.py0000644000175000017500000002245411442422244016625 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import threading, struct, datetime, time from exceptions import Timeout class Struct: def __init__(self, _type, *args, **kwargs): if len(args) > len(_type.fields): raise TypeError("%s() takes at most %s arguments (%s given)" % (_type.name, len(_type.fields), len(args))) self._type = _type idx = 0 for field in _type.fields: if idx < len(args): arg = args[idx] if kwargs.has_key(field.name): raise TypeError("%s() got multiple values for keyword argument '%s'" % (_type.name, field.name)) elif kwargs.has_key(field.name): arg = kwargs.pop(field.name) else: arg = field.default() setattr(self, field.name, arg) idx += 1 if kwargs: unexpected = kwargs.keys()[0] raise TypeError("%s() got an unexpected keyword argument '%s'" % (_type.name, unexpected)) def __getitem__(self, name): return getattr(self, name) def __setitem__(self, name, value): if not hasattr(self, name): raise AttributeError("'%s' object has no attribute '%s'" % (self._type.name, name)) setattr(self, name, value) def __repr__(self): fields = [] for f in self._type.fields: v = self[f.name] if f.type.is_present(v): fields.append("%s=%r" % (f.name, v)) return "%s(%s)" % (self._type.name, ", ".join(fields)) class Message: def __init__(self, *args): if args: self.body = args[-1] else: self.body = None if len(args) > 1: self.headers = list(args[:-1]) else: self.headers = None self.id = None def has(self, name): return self.get(name) != None def get(self, name): if self.headers: for h in self.headers: if h.NAME == name: return h return None def set(self, header): if self.headers is None: self.headers = [] idx = 0 while idx < len(self.headers): if self.headers[idx].NAME == header.NAME: self.headers[idx] = header return idx += 1 self.headers.append(header) def clear(self, name): idx = 0 while idx < len(self.headers): if self.headers[idx].NAME == name: del self.headers[idx] return idx += 1 def __repr__(self): args = [] if self.headers: args.extend(map(repr, self.headers)) if self.body: args.append(repr(self.body)) if self.id is not None: args.append("id=%s" % self.id) return "Message(%s)" % ", ".join(args) def serial(o): if isinstance(o, Serial): return o else: return Serial(o) class Serial: def __init__(self, value): self.value = value & 0xFFFFFFFFL def __hash__(self): return hash(self.value) def __cmp__(self, other): if other.__class__ not in (int, long, Serial): return 1 other = serial(other) delta = (self.value - other.value) & 0xFFFFFFFFL neg = delta & 0x80000000L mag = delta & 0x7FFFFFFF if neg: return -mag else: return mag def __add__(self, other): return Serial(self.value + other) def __sub__(self, other): if isinstance(other, Serial): return self.value - other.value else: return Serial(self.value - other) def __repr__(self): return "serial(%s)" % self.value def __str__(self): return str(self.value) class Range: def __init__(self, lower, upper = None): self.lower = serial(lower) if upper is None: self.upper = self.lower else: self.upper = serial(upper) def __contains__(self, n): return self.lower <= n and n <= self.upper def __iter__(self): i = self.lower while i <= self.upper: yield i i += 1 def touches(self, r): # XXX: are we doing more checks than we need? return (self.lower - 1 in r or self.upper + 1 in r or r.lower - 1 in self or r.upper + 1 in self or self.lower in r or self.upper in r or r.lower in self or r.upper in self) def span(self, r): return Range(min(self.lower, r.lower), max(self.upper, r.upper)) def intersect(self, r): lower = max(self.lower, r.lower) upper = min(self.upper, r.upper) if lower > upper: return None else: return Range(lower, upper) def __repr__(self): return "%s-%s" % (self.lower, self.upper) class RangedSet: def __init__(self, *args): self.ranges = [] for n in args: self.add(n) def __contains__(self, n): for r in self.ranges: if n in r: return True return False def add_range(self, range): idx = 0 while idx < len(self.ranges): r = self.ranges[idx] if range.touches(r): del self.ranges[idx] range = range.span(r) elif range.upper < r.lower: self.ranges.insert(idx, range) return else: idx += 1 self.ranges.append(range) def add(self, lower, upper = None): self.add_range(Range(lower, upper)) def empty(self): for r in self.ranges: if r.lower <= r.upper: return False return True def max(self): if self.ranges: return self.ranges[-1].upper else: return None def min(self): if self.ranges: return self.ranges[0].lower else: return None def __iter__(self): return iter(self.ranges) def __repr__(self): return str(self.ranges) class Future: def __init__(self, initial=None, exception=Exception): self.value = initial self._error = None self._set = threading.Event() self.exception = exception def error(self, error): self._error = error self._set.set() def set(self, value): self.value = value self._set.set() def get(self, timeout=None): self._set.wait(timeout) if self._set.isSet(): if self._error != None: raise self.exception(self._error) return self.value else: raise Timeout() def is_set(self): return self._set.isSet() try: from uuid import uuid4 from uuid import UUID except ImportError: class UUID: def __init__(self, hex=None, bytes=None): if [hex, bytes].count(None) != 1: raise TypeErrror("need one of hex or bytes") if bytes is not None: self.bytes = bytes elif hex is not None: fields=hex.split("-") fields[4:5] = [fields[4][:4], fields[4][4:]] self.bytes = struct.pack("!LHHHHL", *[int(x,16) for x in fields]) def __cmp__(self, other): if isinstance(other, UUID): return cmp(self.bytes, other.bytes) else: return -1 def __str__(self): return "%08x-%04x-%04x-%04x-%04x%08x" % struct.unpack("!LHHHHL", self.bytes) def __repr__(self): return "UUID(%r)" % str(self) def __hash__(self): return self.bytes.__hash__() import os, random, socket, time rand = random.Random() rand.seed((os.getpid(), time.time(), socket.gethostname())) def random_uuid(): bytes = [rand.randint(0, 255) for i in xrange(16)] # From RFC4122, the version bits are set to 0100 bytes[7] &= 0x0F bytes[7] |= 0x40 # From RFC4122, the top two bits of byte 8 get set to 01 bytes[8] &= 0x3F bytes[8] |= 0x80 return "".join(map(chr, bytes)) def uuid4(): return UUID(bytes=random_uuid()) def parseUUID(str): return UUID(hex=str) class timestamp(float): def __new__(cls, obj=None): if obj is None: obj = time.time() elif isinstance(obj, datetime.datetime): obj = time.mktime(obj.timetuple()) + 1e-6 * obj.microsecond return super(timestamp, cls).__new__(cls, obj) def datetime(self): return datetime.datetime.fromtimestamp(self) def __add__(self, other): if isinstance(other, datetime.timedelta): return timestamp(self.datetime() + other) else: return timestamp(float(self) + other) def __sub__(self, other): if isinstance(other, datetime.timedelta): return timestamp(self.datetime() - other) else: return timestamp(float(self) - other) def __radd__(self, other): if isinstance(other, datetime.timedelta): return timestamp(self.datetime() + other) else: return timestamp(other + float(self)) def __rsub__(self, other): if isinstance(other, datetime.timedelta): return timestamp(self.datetime() - other) else: return timestamp(other - float(self)) def __neg__(self): return timestamp(-float(self)) def __pos__(self): return self def __abs__(self): return timestamp(abs(float(self))) def __repr__(self): return "timestamp(%r)" % float(self) qpid-python-0.22/python/qpid/harness.py0000644000175000017500000000146511240310343016261 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # class Skipped(Exception): pass qpid-python-0.22/python/qpid/validator.py0000644000175000017500000000504411352153330016605 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # class Context: def __init__(self): self.containers = [] def push(self, o): self.containers.append(o) def pop(self): return self.containers.pop() class Values: def __init__(self, *values): self.values = values def validate(self, o, ctx): if not o in self.values: return "%s not in %s" % (o, self.values) def __str__(self): return self.value class Types: def __init__(self, *types): self.types = types def validate(self, o, ctx): for t in self.types: if isinstance(o, t): return if len(self.types) == 1: return "%s is not a %s" % (o, self.types[0].__name__) else: return "%s is not one of: %s" % (o, ", ".join([t.__name__ for t in self.types])) class List: def __init__(self, condition): self.condition = condition def validate(self, o, ctx): if not isinstance(o, list): return "%s is not a list" % o ctx.push(o) for v in o: err = self.condition.validate(v, ctx) if err: return err class Map: def __init__(self, map, restricted=True): self.map = map self.restricted = restricted def validate(self, o, ctx): errors = [] if not hasattr(o, "get"): return "%s is not a map" % o ctx.push(o) for k, t in self.map.items(): v = o.get(k) if v is not None: err = t.validate(v, ctx) if err: errors.append("%s: %s" % (k, err)) if self.restricted: for k in o: if not k in self.map: errors.append("%s: illegal key" % k) ctx.pop() if errors: return ", ".join(errors) class And: def __init__(self, *conditions): self.conditions = conditions def validate(self, o, ctx): for c in self.conditions: err = c.validate(o, ctx) if err: return err qpid-python-0.22/python/qpid/compat.py0000644000175000017500000000650211352222762016111 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import sys try: set = set except NameError: from sets import Set as set try: from socket import SHUT_RDWR except ImportError: SHUT_RDWR = 2 try: from traceback import format_exc except ImportError: import traceback def format_exc(): return "".join(traceback.format_exception(*sys.exc_info())) if tuple(sys.version_info[0:2]) < (2, 4): from select import select as old_select def select(rlist, wlist, xlist, timeout=None): return old_select(list(rlist), list(wlist), list(xlist), timeout) else: from select import select class BaseWaiter: def wakeup(self): self._do_write() def wait(self, timeout=None): if timeout is not None: ready, _, _ = select([self], [], [], timeout) else: ready = True if ready: self._do_read() return True else: return False def reading(self): return True def readable(self): self._do_read() if sys.platform in ('win32', 'cygwin'): import socket class SockWaiter(BaseWaiter): def __init__(self, read_sock, write_sock): self.read_sock = read_sock self.write_sock = write_sock def _do_write(self): self.write_sock.send("\0") def _do_read(self): self.read_sock.recv(65536) def fileno(self): return self.read_sock.fileno() def close(self): if self.write_sock is not None: self.write_sock.close() self.write_sock = None self.read_sock.close() self.read_sock = None def __del__(self): self.close() def __repr__(self): return "SockWaiter(%r, %r)" % (self.read_sock, self.write_sock) def selectable_waiter(): listener = socket.socket() listener.bind(('', 0)) listener.listen(1) _, port = listener.getsockname() write_sock = socket.socket() write_sock.connect(("127.0.0.1", port)) read_sock, _ = listener.accept() listener.close() return SockWaiter(read_sock, write_sock) else: import os class PipeWaiter(BaseWaiter): def __init__(self): self.read_fd, self.write_fd = os.pipe() def _do_write(self): os.write(self.write_fd, "\0") def _do_read(self): os.read(self.read_fd, 65536) def fileno(self): return self.read_fd def close(self): if self.write_fd is not None: os.close(self.write_fd) self.write_fd = None os.close(self.read_fd) self.read_fd = None def __del__(self): self.close() def __repr__(self): return "PipeWaiter(%r, %r)" % (self.read_fd, self.write_fd) def selectable_waiter(): return PipeWaiter() qpid-python-0.22/python/qpid/ops.py0000644000175000017500000002012511405705173015425 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import os, mllib, cPickle as pickle, sys from util import fill class Primitive(object): pass class Enum(object): # XXX: for backwards compatibility def values(cls): print >> sys.stderr, "warning, please use .VALUES instead of .values()" return cls.VALUES # we can't use the backport preprocessor here because this code gets # called by setup.py values = classmethod(values) class Field: def __init__(self, name, type, default=None): self.name = name self.type = type self.default = default def __repr__(self): return "%s: %s" % (self.name, self.type) class Compound(object): UNENCODED=[] def __init__(self, *args, **kwargs): args = list(args) for f in self.ARGS: if args: a = args.pop(0) else: a = kwargs.pop(f.name, f.default) setattr(self, f.name, a) if args: raise TypeError("%s takes at most %s arguments (%s given))" % (self.__class__.__name__, len(self.ARGS), len(self.ARGS) + len(args))) if kwargs: raise TypeError("got unexpected keyword argument '%s'" % kwargs.keys()[0]) def fields(self): result = {} for f in self.FIELDS: result[f.name] = getattr(self, f.name) return result def args(self): result = {} for f in self.ARGS: result[f.name] = getattr(self, f.name) return result def __getitem__(self, attr): return getattr(self, attr) def __setitem__(self, attr, value): setattr(self, attr, value) def dispatch(self, target, *args): handler = "do_%s" % self.NAME getattr(target, handler)(self, *args) def __repr__(self, extras=()): return "%s(%s)" % (self.__class__.__name__, ", ".join(["%s=%r" % (f.name, getattr(self, f.name)) for f in self.ARGS if getattr(self, f.name) != f.default])) class Command(Compound): UNENCODED=[Field("channel", "uint16", 0), Field("id", "sequence-no", None), Field("sync", "bit", False), Field("headers", None, None), Field("payload", None, None)] class Control(Compound): UNENCODED=[Field("channel", "uint16", 0)] def pythonize(st): if st is None: return None else: return str(st.replace("-", "_")) def pydoc(op, children=()): doc = "\n\n".join([fill(p.text(), 0) for p in op.query["doc"]]) for ch in children: doc += "\n\n " + pythonize(ch["@name"]) + " -- " + str(ch["@label"]) ch_descs ="\n\n".join([fill(p.text(), 4) for p in ch.query["doc"]]) if ch_descs: doc += "\n\n" + ch_descs return doc def studly(st): return "".join([p.capitalize() for p in st.split("-")]) def klass(nd): while nd.parent is not None: if hasattr(nd.parent, "name") and nd.parent.name == "class": return nd.parent else: nd = nd.parent def included(nd): cls = klass(nd) if cls is None: return True else: return cls["@name"] not in ("file", "stream") def num(s): if s: return int(s, 0) def code(nd): c = num(nd["@code"]) if c is None: return None else: cls = klass(nd) if cls is None: return c else: return c | (num(cls["@code"]) << 8) def default(f): if f["@type"] == "bit": return False else: return None def make_compound(decl, base, domains): dict = {} fields = decl.query["field"] dict["__doc__"] = pydoc(decl, fields) dict["NAME"] = pythonize(decl["@name"]) dict["SIZE"] = num(decl["@size"]) dict["CODE"] = code(decl) dict["PACK"] = num(decl["@pack"]) dict["FIELDS"] = [Field(pythonize(f["@name"]), resolve(f, domains), default(f)) for f in fields] dict["ARGS"] = dict["FIELDS"] + base.UNENCODED return str(studly(decl["@name"])), (base,), dict def make_restricted(decl, domains): name = pythonize(decl["@name"]) dict = {} choices = decl.query["choice"] dict["__doc__"] = pydoc(decl, choices) dict["NAME"] = name dict["TYPE"] = str(decl.parent["@type"]) values = [] for ch in choices: val = int(ch["@value"], 0) dict[pythonize(ch["@name"])] = val values.append(val) dict["VALUES"] = values return name, (Enum,), dict def make_type(decl, domains): name = pythonize(decl["@name"]) dict = {} dict["__doc__"] = pydoc(decl) dict["NAME"] = name dict["CODE"] = code(decl) return str(studly(decl["@name"])), (Primitive,), dict def make_command(decl, domains): decl.set_attr("name", "%s-%s" % (decl.parent["@name"], decl["@name"])) decl.set_attr("size", "0") decl.set_attr("pack", "2") name, bases, dict = make_compound(decl, Command, domains) dict["RESULT"] = pythonize(decl["result/@type"]) or pythonize(decl["result/struct/@name"]) return name, bases, dict def make_control(decl, domains): decl.set_attr("name", "%s-%s" % (decl.parent["@name"], decl["@name"])) decl.set_attr("size", "0") decl.set_attr("pack", "2") return make_compound(decl, Control, domains) def make_struct(decl, domains): return make_compound(decl, Compound, domains) def make_enum(decl, domains): decl.set_attr("name", decl.parent["@name"]) return make_restricted(decl, domains) vars = globals() def make(nd, domains): return vars["make_%s" % nd.name](nd, domains) def qualify(nd, field="@name"): cls = klass(nd) if cls is None: return pythonize(nd[field]) else: return pythonize("%s.%s" % (cls["@name"], nd[field])) def resolve(nd, domains): candidates = qualify(nd, "@type"), pythonize(nd["@type"]) for c in candidates: if domains.has_key(c): while domains.has_key(c): c = domains[c] return c else: return c def load_types_from_xml(file): spec = mllib.xml_parse(file) domains = dict([(qualify(d), pythonize(d["@type"])) for d in spec.query["amqp/domain", included] + \ spec.query["amqp/class/domain", included]]) type_decls = \ spec.query["amqp/class/command", included] + \ spec.query["amqp/class/control", included] + \ spec.query["amqp/class/command/result/struct", included] + \ spec.query["amqp/class/struct", included] + \ spec.query["amqp/class/domain/enum", included] + \ spec.query["amqp/domain/enum", included] + \ spec.query["amqp/type"] types = [make(nd, domains) for nd in type_decls] return types def load_types(file): base, ext = os.path.splitext(file) pclfile = "%s.pcl" % base if os.path.exists(pclfile) and \ os.path.getmtime(pclfile) > os.path.getmtime(file): f = open(pclfile, "rb") types = pickle.load(f) f.close() else: types = load_types_from_xml(file) if os.access(os.path.dirname(os.path.abspath(pclfile)), os.W_OK): f = open(pclfile, "wb") pickle.dump(types, f) f.close() return types from specs_config import amqp_spec as file types = load_types(file) ENUMS = {} PRIMITIVE = {} COMPOUND = {} COMMANDS = {} CONTROLS = {} for name, bases, _dict in types: t = type(name, bases, _dict) vars[name] = t if issubclass(t, Command): COMMANDS[t.NAME] = t COMMANDS[t.CODE] = t elif issubclass(t, Control): CONTROLS[t.NAME] = t CONTROLS[t.CODE] = t elif issubclass(t, Compound): COMPOUND[t.NAME] = t if t.CODE is not None: COMPOUND[t.CODE] = t elif issubclass(t, Primitive): PRIMITIVE[t.NAME] = t PRIMITIVE[t.CODE] = t elif issubclass(t, Enum): ENUMS[t.NAME] = t qpid-python-0.22/python/qpid/peer.py0000644000175000017500000003342011326041427015556 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ This module contains a skeletal peer implementation useful for implementing an AMQP server, client, or proxy. The peer implementation sorts incoming frames to their intended channels, and dispatches incoming method frames to a delegate. """ import thread, threading, traceback, socket, sys, logging from connection08 import EOF, Method, Header, Body, Request, Response, VersionError from message import Message from queue import Queue, Closed as QueueClosed from content import Content from cStringIO import StringIO from time import time from exceptions import Closed class Sequence: def __init__(self, start, step = 1): # we should keep start for wrap around self._next = start self.step = step self.lock = thread.allocate_lock() def next(self): self.lock.acquire() try: result = self._next self._next += self.step return result finally: self.lock.release() class Peer: def __init__(self, conn, delegate, channel_factory=None): self.conn = conn self.delegate = delegate self.outgoing = Queue(0) self.work = Queue(0) self.channels = {} self.lock = thread.allocate_lock() if channel_factory: self.channel_factory = channel_factory else: self.channel_factory = Channel def channel(self, id): self.lock.acquire() try: try: ch = self.channels[id] except KeyError: ch = self.channel_factory(id, self.outgoing, self.conn.spec) self.channels[id] = ch finally: self.lock.release() return ch def start(self): thread.start_new_thread(self.writer, ()) thread.start_new_thread(self.reader, ()) thread.start_new_thread(self.worker, ()) def fatal(self, message=None): """Call when an unexpected exception occurs that will kill a thread.""" if message: print >> sys.stderr, message self.closed("Fatal error: %s\n%s" % (message or "", traceback.format_exc())) def reader(self): try: while True: try: frame = self.conn.read() except EOF, e: self.work.close() break ch = self.channel(frame.channel) ch.receive(frame, self.work) except VersionError, e: self.closed(e) except: self.fatal() def closed(self, reason): # We must close the delegate first because closing channels # may wake up waiting threads and we don't want them to see # the delegate as open. self.delegate.closed(reason) for ch in self.channels.values(): ch.closed(reason) def writer(self): try: while True: try: message = self.outgoing.get() self.conn.write(message) except socket.error, e: self.closed(e) break self.conn.flush() except: self.fatal() def worker(self): try: while True: queue = self.work.get() frame = queue.get() channel = self.channel(frame.channel) if frame.method_type.content: content = read_content(queue) else: content = None self.delegate(channel, Message(channel, frame, content)) except QueueClosed: self.closed("worker closed") except: self.fatal() class Requester: def __init__(self, writer): self.write = writer self.sequence = Sequence(1) self.mark = 0 # request_id -> listener self.outstanding = {} def request(self, method, listener, content = None): frame = Request(self.sequence.next(), self.mark, method) self.outstanding[frame.id] = listener self.write(frame, content) def receive(self, channel, frame): listener = self.outstanding.pop(frame.request_id) listener(channel, frame) class Responder: def __init__(self, writer): self.write = writer self.sequence = Sequence(1) def respond(self, method, batch, request): if isinstance(request, Method): self.write(method) else: # allow batching from frame at either end if batch<0: frame = Response(self.sequence.next(), request.id+batch, -batch, method) else: frame = Response(self.sequence.next(), request.id, batch, method) self.write(frame) class Channel: def __init__(self, id, outgoing, spec): self.id = id self.outgoing = outgoing self.spec = spec self.incoming = Queue(0) self.responses = Queue(0) self.queue = None self._closed = False self.reason = None self.requester = Requester(self.write) self.responder = Responder(self.write) self.completion = OutgoingCompletion() self.incoming_completion = IncomingCompletion(self) self.futures = {} self.control_queue = Queue(0)#used for incoming methods that appas may want to handle themselves self.invoker = self.invoke_method self.use_execution_layer = (spec.major == 0 and spec.minor == 10) or (spec.major == 99 and spec.minor == 0) self.synchronous = True def closed(self, reason): if self._closed: return self._closed = True self.reason = reason self.incoming.close() self.responses.close() self.completion.close() self.incoming_completion.reset() for f in self.futures.values(): f.put_response(self, reason) def write(self, frame, content = None): if self._closed: raise Closed(self.reason) frame.channel = self.id self.outgoing.put(frame) if (isinstance(frame, (Method, Request)) and content == None and frame.method_type.content): content = Content() if content != None: self.write_content(frame.method_type.klass, content) def write_content(self, klass, content): header = Header(klass, content.weight(), content.size(), content.properties) self.write(header) for child in content.children: self.write_content(klass, child) # should split up if content.body exceeds max frame size if content.body: self.write(Body(content.body)) def receive(self, frame, work): if isinstance(frame, Method): if frame.method.response: self.queue = self.responses else: self.queue = self.incoming work.put(self.incoming) elif isinstance(frame, Request): self.queue = self.incoming work.put(self.incoming) elif isinstance(frame, Response): self.requester.receive(self, frame) if frame.method_type.content: self.queue = self.responses return self.queue.put(frame) def queue_response(self, channel, frame): channel.responses.put(frame.method) def request(self, method, listener, content = None): self.requester.request(method, listener, content) def respond(self, method, batch, request): self.responder.respond(method, batch, request) def invoke(self, type, args, kwargs): if (type.klass.name in ["channel", "session"]) and (type.name in ["close", "open", "closed"]): self.completion.reset() self.incoming_completion.reset() self.completion.next_command(type) content = kwargs.pop("content", None) frame = Method(type, type.arguments(*args, **kwargs)) return self.invoker(frame, content) # used for 0-9 def invoke_reliable(self, frame, content = None): if not self.synchronous: future = Future() self.request(frame, future.put_response, content) if not frame.method.responses: return None else: return future self.request(frame, self.queue_response, content) if not frame.method.responses: if self.use_execution_layer and frame.method_type.is_l4_command(): self.execution_sync() self.completion.wait() if self._closed: raise Closed(self.reason) return None try: resp = self.responses.get() if resp.method_type.content: return Message(self, resp, read_content(self.responses)) else: return Message(self, resp) except QueueClosed, e: if self._closed: raise Closed(self.reason) else: raise e # used for 0-8 and 0-10 def invoke_method(self, frame, content = None): if frame.method.result: cmd_id = self.completion.command_id future = Future() self.futures[cmd_id] = future self.write(frame, content) try: # here we depend on all nowait fields being named nowait f = frame.method.fields.byname["nowait"] nowait = frame.args[frame.method.fields.index(f)] except KeyError: nowait = False try: if not nowait and frame.method.responses: resp = self.responses.get() if resp.method.content: content = read_content(self.responses) else: content = None if resp.method in frame.method.responses: return Message(self, resp, content) else: raise ValueError(resp) elif frame.method.result: if self.synchronous: fr = future.get_response(timeout=10) if self._closed: raise Closed(self.reason) return fr else: return future elif self.synchronous and not frame.method.response \ and self.use_execution_layer and frame.method.is_l4_command(): self.execution_sync() completed = self.completion.wait(timeout=10) if self._closed: raise Closed(self.reason) if not completed: self.closed("Timed-out waiting for completion of %s" % frame) except QueueClosed, e: if self._closed: raise Closed(self.reason) else: raise e def __getattr__(self, name): type = self.spec.method(name) if type == None: raise AttributeError(name) method = lambda *args, **kwargs: self.invoke(type, args, kwargs) self.__dict__[name] = method return method def read_content(queue): header = queue.get() children = [] for i in range(header.weight): children.append(read_content(queue)) buf = StringIO() eof = header.eof while not eof: body = queue.get() eof = body.eof content = body.content buf.write(content) return Content(buf.getvalue(), children, header.properties.copy()) class Future: def __init__(self): self.completed = threading.Event() def put_response(self, channel, response): self.response = response self.completed.set() def get_response(self, timeout=None): self.completed.wait(timeout) if self.completed.isSet(): return self.response else: return None def is_complete(self): return self.completed.isSet() class OutgoingCompletion: """ Manages completion of outgoing commands i.e. command sent by this peer """ def __init__(self): self.condition = threading.Condition() #todo, implement proper wraparound self.sequence = Sequence(0) #issues ids for outgoing commands self.command_id = -1 #last issued id self.mark = -1 #commands up to this mark are known to be complete self._closed = False def next_command(self, method): #the following test is a hack until the track/sub-channel is available if method.is_l4_command(): self.command_id = self.sequence.next() def reset(self): self.sequence = Sequence(0) #reset counter def close(self): self.reset() self.condition.acquire() try: self._closed = True self.condition.notifyAll() finally: self.condition.release() def complete(self, mark): self.condition.acquire() try: self.mark = mark #print "set mark to %s [%s] " % (self.mark, self) self.condition.notifyAll() finally: self.condition.release() def wait(self, point_of_interest=-1, timeout=None): if point_of_interest == -1: point_of_interest = self.command_id start_time = time() remaining = timeout self.condition.acquire() try: while not self._closed and point_of_interest > self.mark: #print "waiting for %s, mark = %s [%s]" % (point_of_interest, self.mark, self) self.condition.wait(remaining) if not self._closed and point_of_interest > self.mark and timeout: if (start_time + timeout) < time(): break else: remaining = timeout - (time() - start_time) finally: self.condition.release() return point_of_interest <= self.mark class IncomingCompletion: """ Manages completion of incoming commands i.e. command received by this peer """ def __init__(self, channel): self.sequence = Sequence(0) #issues ids for incoming commands self.mark = -1 #id of last command of whose completion notification was sent to the other peer self.channel = channel def reset(self): self.sequence = Sequence(0) #reset counter def complete(self, mark, cumulative=True): if cumulative: if mark > self.mark: self.mark = mark self.channel.execution_complete(cumulative_execution_mark=self.mark) else: #TODO: record and manage the ranges properly range = [mark, mark] if (self.mark == -1):#hack until wraparound is implemented self.channel.execution_complete(cumulative_execution_mark=0xFFFFFFFFL, ranged_execution_set=range) else: self.channel.execution_complete(cumulative_execution_mark=self.mark, ranged_execution_set=range) qpid-python-0.22/python/qpid/generator.py0000644000175000017500000000322611240310343016601 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import sys from ops import * def METHOD(module, op): method = lambda self, *args, **kwargs: self.invoke(op, args, kwargs) if sys.version_info[:2] > (2, 3): method.__name__ = op.__name__ method.__doc__ = op.__doc__ method.__module__ = module return method def generate(module, operations): dict = {} for name, enum in ENUMS.items(): if isinstance(name, basestring): dict[name] = enum for name, op in COMPOUND.items(): if isinstance(name, basestring): dict[name] = METHOD(module, op) for name, op in operations.items(): if isinstance(name, basestring): dict[name] = METHOD(module, op) return dict def invoker(name, operations): return type(name, (), generate(invoker.__module__, operations)) def command_invoker(): return invoker("CommandInvoker", COMMANDS) def control_invoker(): return invoker("ControlInvoker", CONTROLS) qpid-python-0.22/python/qpid/selector.py0000644000175000017500000000730512005611322016436 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import atexit, time, errno from compat import select, set, selectable_waiter from threading import Thread, Lock class Acceptor: def __init__(self, sock, handler): self.sock = sock self.handler = handler def fileno(self): return self.sock.fileno() def reading(self): return True def writing(self): return False def readable(self): sock, addr = self.sock.accept() self.handler(sock) class Selector: lock = Lock() DEFAULT = None @staticmethod def default(): Selector.lock.acquire() try: if Selector.DEFAULT is None: sel = Selector() atexit.register(sel.stop) sel.start() Selector.DEFAULT = sel return Selector.DEFAULT finally: Selector.lock.release() def __init__(self): self.selectables = set() self.reading = set() self.writing = set() self.waiter = selectable_waiter() self.reading.add(self.waiter) self.stopped = False self.thread = None def wakeup(self): self.waiter.wakeup() def register(self, selectable): self.selectables.add(selectable) self.modify(selectable) def _update(self, selectable): if selectable.reading(): self.reading.add(selectable) else: self.reading.discard(selectable) if selectable.writing(): self.writing.add(selectable) else: self.writing.discard(selectable) return selectable.timing() def modify(self, selectable): self._update(selectable) self.wakeup() def unregister(self, selectable): self.reading.discard(selectable) self.writing.discard(selectable) self.selectables.discard(selectable) self.wakeup() def start(self): self.stopped = False self.thread = Thread(target=self.run) self.thread.setDaemon(True) self.thread.start(); def run(self): while not self.stopped: wakeup = None for sel in self.selectables.copy(): t = self._update(sel) if t is not None: if wakeup is None: wakeup = t else: wakeup = min(wakeup, t) rd = [] wr = [] ex = [] while True: try: if wakeup is None: timeout = None else: timeout = max(0, wakeup - time.time()) rd, wr, ex = select(self.reading, self.writing, (), timeout) break except Exception, (err, strerror): # Repeat the select call if we were interrupted. if err == errno.EINTR: continue else: raise for sel in wr: if sel.writing(): sel.writeable() for sel in rd: if sel.reading(): sel.readable() now = time.time() for sel in self.selectables.copy(): w = sel.timing() if w is not None and now > w: sel.timeout() def stop(self, timeout=None): self.stopped = True self.wakeup() self.thread.join(timeout) self.thread = None qpid-python-0.22/python/qpid/message.py0000644000175000017500000000462711202365716016261 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from connection08 import Method, Request class Message: def __init__(self, channel, frame, content = None): self.channel = channel self.frame = frame self.method = frame.method_type self.content = content if self.method.is_l4_command(): self.command_id = self.channel.incoming_completion.sequence.next() #print "allocated: ", self.command_id, "to ", self.method.klass.name, "_", self.method.name def __len__(self): return len(self.frame.args) def _idx(self, idx): if idx < 0: idx += len(self) if idx < 0 or idx > len(self): raise IndexError(idx) return idx def __getitem__(self, idx): return self.frame.args[idx] def __getattr__(self, attr): fields = self.method.fields.byname if fields.has_key(attr): f = fields[attr] result = self[self.method.fields.index(f)] else: for r in self.method.responses: if attr == r.name: def respond(*args, **kwargs): batch=0 if kwargs.has_key("batchoffset"): batch=kwargs.pop("batchoffset") self.channel.respond(Method(r, r.arguments(*args, **kwargs)), batch, self.frame) result = respond break else: raise AttributeError(attr) return result STR = "%s %s content = %s" REPR = STR.replace("%s", "%r") def __str__(self): return Message.STR % (self.method, self.frame.args, self.content) def __repr__(self): return Message.REPR % (self.method, self.frame.args, self.content) def complete(self, cumulative=True): self.channel.incoming_completion.complete(mark=self.command_id, cumulative=cumulative) qpid-python-0.22/python/qpid/management.py0000644000175000017500000007114511430533717016751 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # ############################################################################### ## This file is being obsoleted by qmf/console.py ############################################################################### """ Management API for Qpid """ import qpid import struct import socket from threading import Thread from datatypes import Message, RangedSet from time import time from cStringIO import StringIO from codec010 import StringCodec as Codec from threading import Lock, Condition class SequenceManager: """ Manage sequence numbers for asynchronous method calls """ def __init__ (self): self.lock = Lock () self.sequence = 0 self.pending = {} def reserve (self, data): """ Reserve a unique sequence number """ self.lock.acquire () result = self.sequence self.sequence = self.sequence + 1 self.pending[result] = data self.lock.release () return result def release (self, seq): """ Release a reserved sequence number """ data = None self.lock.acquire () if seq in self.pending: data = self.pending[seq] del self.pending[seq] self.lock.release () return data class mgmtObject (object): """ Generic object that holds the contents of a management object with its attributes set as object attributes. """ def __init__ (self, classKey, timestamps, row): self.classKey = classKey self.timestamps = timestamps for cell in row: setattr (self, cell[0], cell[1]) class objectId(object): """ Object that represents QMF object identifiers """ def __init__(self, codec, first=0, second=0): if codec: self.first = codec.read_uint64() self.second = codec.read_uint64() else: self.first = first self.second = second def __cmp__(self, other): if other == None: return 1 if self.first < other.first: return -1 if self.first > other.first: return 1 if self.second < other.second: return -1 if self.second > other.second: return 1 return 0 def index(self): return (self.first, self.second) def getFlags(self): return (self.first & 0xF000000000000000) >> 60 def getSequence(self): return (self.first & 0x0FFF000000000000) >> 48 def getBroker(self): return (self.first & 0x0000FFFFF0000000) >> 28 def getBank(self): return self.first & 0x000000000FFFFFFF def getObject(self): return self.second def isDurable(self): return self.getSequence() == 0 def encode(self, codec): codec.write_uint64(self.first) codec.write_uint64(self.second) class methodResult: """ Object that contains the result of a method call """ def __init__ (self, status, sText, args): self.status = status self.statusText = sText for arg in args: setattr (self, arg, args[arg]) class brokerInfo: """ Object that contains information about a broker and the session to it """ def __init__ (self, brokerId, sessionId): self.brokerId = brokerId self.sessionId = sessionId class managementChannel: """ This class represents a connection to an AMQP broker. """ def __init__ (self, ssn, topicCb, replyCb, exceptionCb, cbContext, _detlife=0): """ Given a channel on an established AMQP broker connection, this method opens a session and performs all of the declarations and bindings needed to participate in the management protocol. """ self.enabled = True self.ssn = ssn self.sessionId = ssn.name self.topicName = "mgmt-%s" % self.sessionId self.replyName = "repl-%s" % self.sessionId self.qpidChannel = ssn self.tcb = topicCb self.rcb = replyCb self.ecb = exceptionCb self.context = cbContext self.reqsOutstanding = 0 self.brokerInfo = None ssn.auto_sync = False ssn.queue_declare (queue=self.topicName, exclusive=True, auto_delete=True) ssn.queue_declare (queue=self.replyName, exclusive=True, auto_delete=True) ssn.exchange_bind (exchange="amq.direct", queue=self.replyName, binding_key=self.replyName) ssn.message_subscribe (queue=self.topicName, destination="tdest", accept_mode=ssn.accept_mode.none, acquire_mode=ssn.acquire_mode.pre_acquired) ssn.message_subscribe (queue=self.replyName, destination="rdest", accept_mode=ssn.accept_mode.none, acquire_mode=ssn.acquire_mode.pre_acquired) ssn.incoming ("tdest").listen (self.topicCb, self.exceptionCb) ssn.incoming ("rdest").listen (self.replyCb) ssn.message_set_flow_mode (destination="tdest", flow_mode=1) ssn.message_flow (destination="tdest", unit=0, value=0xFFFFFFFFL) ssn.message_flow (destination="tdest", unit=1, value=0xFFFFFFFFL) ssn.message_set_flow_mode (destination="rdest", flow_mode=1) ssn.message_flow (destination="rdest", unit=0, value=0xFFFFFFFFL) ssn.message_flow (destination="rdest", unit=1, value=0xFFFFFFFFL) def setBrokerInfo (self, data): self.brokerInfo = data def shutdown (self): self.enabled = False self.ssn.incoming("tdest").stop() self.ssn.incoming("rdest").stop() def topicCb (self, msg): """ Receive messages via the topic queue on this channel. """ if self.enabled: self.tcb (self, msg) self.ssn.receiver._completed.add(msg.id) self.ssn.channel.session_completed(self.ssn.receiver._completed) def replyCb (self, msg): """ Receive messages via the reply queue on this channel. """ if self.enabled: self.rcb (self, msg) self.ssn.receiver._completed.add(msg.id) self.ssn.channel.session_completed(self.ssn.receiver._completed) def exceptionCb (self, data): if self.ecb != None: self.ecb (self, data) def send (self, exchange, msg): if self.enabled: self.qpidChannel.message_transfer (destination=exchange, message=msg) def message (self, body, routing_key="broker"): dp = self.qpidChannel.delivery_properties() dp.routing_key = routing_key mp = self.qpidChannel.message_properties() mp.content_type = "application/octet-stream" mp.reply_to = self.qpidChannel.reply_to("amq.direct", self.replyName) return Message(dp, mp, body) class managementClient: """ This class provides an API for access to management data on the AMQP network. It implements the management protocol and manages the management schemas as advertised by the various management agents in the network. """ CTRL_BROKER_INFO = 1 CTRL_SCHEMA_LOADED = 2 CTRL_USER = 3 CTRL_HEARTBEAT = 4 SYNC_TIME = 10.0 #======================================================== # User API - interacts with the class's user #======================================================== def __init__ (self, unused=None, ctrlCb=None, configCb=None, instCb=None, methodCb=None, closeCb=None): self.ctrlCb = ctrlCb self.configCb = configCb self.instCb = instCb self.methodCb = methodCb self.closeCb = closeCb self.schemaCb = None self.eventCb = None self.channels = [] self.seqMgr = SequenceManager () self.schema = {} self.packages = {} self.cv = Condition () self.syncInFlight = False self.syncSequence = 0 self.syncResult = None def schemaListener (self, schemaCb): """ Optionally register a callback to receive details of the schema of managed objects in the network. """ self.schemaCb = schemaCb def eventListener (self, eventCb): """ Optionally register a callback to receive events from managed objects in the network. """ self.eventCb = eventCb def addChannel (self, channel, cbContext=None): """ Register a new channel. """ mch = managementChannel (channel, self.topicCb, self.replyCb, self.exceptCb, cbContext) self.channels.append (mch) self.incOutstanding (mch) codec = Codec () self.setHeader (codec, ord ('B')) msg = mch.message(codec.encoded) mch.send ("qpid.management", msg) return mch def removeChannel (self, mch): """ Remove a previously added channel from management. """ mch.shutdown () self.channels.remove (mch) def callMethod (self, channel, userSequence, objId, className, methodName, args=None): """ Invoke a method on a managed object. """ self.method (channel, userSequence, objId, className, methodName, args) def getObjects (self, channel, userSequence, className, bank=0): """ Request immediate content from broker """ codec = Codec () self.setHeader (codec, ord ('G'), userSequence) ft = {} ft["_class"] = className codec.write_map (ft) msg = channel.message(codec.encoded, routing_key="agent.1.%d" % bank) channel.send ("qpid.management", msg) def syncWaitForStable (self, channel): """ Synchronous (blocking) call to wait for schema stability on a channel """ self.cv.acquire () if channel.reqsOutstanding == 0: self.cv.release () return channel.brokerInfo self.syncInFlight = True starttime = time () while channel.reqsOutstanding != 0: self.cv.wait (self.SYNC_TIME) if time () - starttime > self.SYNC_TIME: self.cv.release () raise RuntimeError ("Timed out waiting for response on channel") self.cv.release () return channel.brokerInfo def syncCallMethod (self, channel, objId, className, methodName, args=None): """ Synchronous (blocking) method call """ self.cv.acquire () self.syncInFlight = True self.syncResult = None self.syncSequence = self.seqMgr.reserve ("sync") self.cv.release () self.callMethod (channel, self.syncSequence, objId, className, methodName, args) self.cv.acquire () starttime = time () while self.syncInFlight: self.cv.wait (self.SYNC_TIME) if time () - starttime > self.SYNC_TIME: self.cv.release () raise RuntimeError ("Timed out waiting for response on channel") result = self.syncResult self.cv.release () return result def syncGetObjects (self, channel, className, bank=0): """ Synchronous (blocking) get call """ self.cv.acquire () self.syncInFlight = True self.syncResult = [] self.syncSequence = self.seqMgr.reserve ("sync") self.cv.release () self.getObjects (channel, self.syncSequence, className, bank) self.cv.acquire () starttime = time () while self.syncInFlight: self.cv.wait (self.SYNC_TIME) if time () - starttime > self.SYNC_TIME: self.cv.release () raise RuntimeError ("Timed out waiting for response on channel") result = self.syncResult self.cv.release () return result #======================================================== # Channel API - interacts with registered channel objects #======================================================== def topicCb (self, ch, msg): """ Receive messages via the topic queue of a particular channel. """ codec = Codec (msg.body) while True: hdr = self.checkHeader (codec) if hdr == None: return if hdr[0] == 'p': self.handlePackageInd (ch, codec) elif hdr[0] == 'q': self.handleClassInd (ch, codec) elif hdr[0] == 'h': self.handleHeartbeat (ch, codec) elif hdr[0] == 'e': self.handleEvent (ch, codec) else: self.parse (ch, codec, hdr[0], hdr[1]) def replyCb (self, ch, msg): """ Receive messages via the reply queue of a particular channel. """ codec = Codec (msg.body) while True: hdr = self.checkHeader (codec) if hdr == None: return if hdr[0] == 'm': self.handleMethodReply (ch, codec, hdr[1]) elif hdr[0] == 'z': self.handleCommandComplete (ch, codec, hdr[1]) elif hdr[0] == 'b': self.handleBrokerResponse (ch, codec) elif hdr[0] == 'p': self.handlePackageInd (ch, codec) elif hdr[0] == 'q': self.handleClassInd (ch, codec) else: self.parse (ch, codec, hdr[0], hdr[1]) def exceptCb (self, ch, data): if self.closeCb != None: self.closeCb (ch.context, data) #======================================================== # Internal Functions #======================================================== def setHeader (self, codec, opcode, seq = 0): """ Compose the header of a management message. """ codec.write_uint8 (ord ('A')) codec.write_uint8 (ord ('M')) codec.write_uint8 (ord ('2')) codec.write_uint8 (opcode) codec.write_uint32 (seq) def checkHeader (self, codec): """ Check the header of a management message and extract the opcode and class. """ try: octet = chr (codec.read_uint8 ()) if octet != 'A': return None octet = chr (codec.read_uint8 ()) if octet != 'M': return None octet = chr (codec.read_uint8 ()) if octet != '2': return None opcode = chr (codec.read_uint8 ()) seq = codec.read_uint32 () return (opcode, seq) except: return None def encodeValue (self, codec, value, typecode): """ Encode, into the codec, a value based on its typecode. """ if typecode == 1: codec.write_uint8 (int (value)) elif typecode == 2: codec.write_uint16 (int (value)) elif typecode == 3: codec.write_uint32 (long (value)) elif typecode == 4: codec.write_uint64 (long (value)) elif typecode == 5: codec.write_uint8 (int (value)) elif typecode == 6: codec.write_str8 (value) elif typecode == 7: codec.write_str16 (value) elif typecode == 8: # ABSTIME codec.write_uint64 (long (value)) elif typecode == 9: # DELTATIME codec.write_uint64 (long (value)) elif typecode == 10: # REF value.encode(codec) elif typecode == 11: # BOOL codec.write_uint8 (int (value)) elif typecode == 12: # FLOAT codec.write_float (float (value)) elif typecode == 13: # DOUBLE codec.write_double (float (value)) elif typecode == 14: # UUID codec.write_uuid (value) elif typecode == 15: # FTABLE codec.write_map (value) elif typecode == 16: codec.write_int8 (int(value)) elif typecode == 17: codec.write_int16 (int(value)) elif typecode == 18: codec.write_int32 (int(value)) elif typecode == 19: codec.write_int64 (int(value)) else: raise ValueError ("Invalid type code: %d" % typecode) def decodeValue (self, codec, typecode): """ Decode, from the codec, a value based on its typecode. """ if typecode == 1: data = codec.read_uint8 () elif typecode == 2: data = codec.read_uint16 () elif typecode == 3: data = codec.read_uint32 () elif typecode == 4: data = codec.read_uint64 () elif typecode == 5: data = codec.read_uint8 () elif typecode == 6: data = codec.read_str8 () elif typecode == 7: data = codec.read_str16 () elif typecode == 8: # ABSTIME data = codec.read_uint64 () elif typecode == 9: # DELTATIME data = codec.read_uint64 () elif typecode == 10: # REF data = objectId(codec) elif typecode == 11: # BOOL data = codec.read_uint8 () elif typecode == 12: # FLOAT data = codec.read_float () elif typecode == 13: # DOUBLE data = codec.read_double () elif typecode == 14: # UUID data = codec.read_uuid () elif typecode == 15: # FTABLE data = codec.read_map () elif typecode == 16: data = codec.read_int8 () elif typecode == 17: data = codec.read_int16 () elif typecode == 18: data = codec.read_int32 () elif typecode == 19: data = codec.read_int64 () else: raise ValueError ("Invalid type code: %d" % typecode) return data def incOutstanding (self, ch): self.cv.acquire () ch.reqsOutstanding = ch.reqsOutstanding + 1 self.cv.release () def decOutstanding (self, ch): self.cv.acquire () ch.reqsOutstanding = ch.reqsOutstanding - 1 if ch.reqsOutstanding == 0 and self.syncInFlight: self.syncInFlight = False self.cv.notify () self.cv.release () if ch.reqsOutstanding == 0: if self.ctrlCb != None: self.ctrlCb (ch.context, self.CTRL_SCHEMA_LOADED, None) ch.ssn.exchange_bind (exchange="qpid.management", queue=ch.topicName, binding_key="console.#") ch.ssn.exchange_bind (exchange="qpid.management", queue=ch.topicName, binding_key="schema.#") def handleMethodReply (self, ch, codec, sequence): status = codec.read_uint32 () sText = codec.read_str16 () data = self.seqMgr.release (sequence) if data == None: return (userSequence, classId, methodName) = data args = {} context = self.seqMgr.release (userSequence) if status == 0: schemaClass = self.schema[classId] ms = schemaClass['M'] arglist = None for mname in ms: (mdesc, margs) = ms[mname] if mname == methodName: arglist = margs if arglist == None: return for arg in arglist: if arg[2].find("O") != -1: args[arg[0]] = self.decodeValue (codec, arg[1]) if context == "sync" and userSequence == self.syncSequence: self.cv.acquire () self.syncInFlight = False self.syncResult = methodResult (status, sText, args) self.cv.notify () self.cv.release () elif self.methodCb != None: self.methodCb (ch.context, userSequence, status, sText, args) def handleCommandComplete (self, ch, codec, seq): code = codec.read_uint32 () text = codec.read_str8 () data = (seq, code, text) context = self.seqMgr.release (seq) if context == "outstanding": self.decOutstanding (ch) elif context == "sync" and seq == self.syncSequence: self.cv.acquire () self.syncInFlight = False self.cv.notify () self.cv.release () elif self.ctrlCb != None: self.ctrlCb (ch.context, self.CTRL_USER, data) def handleBrokerResponse (self, ch, codec): uuid = codec.read_uuid () ch.brokerInfo = brokerInfo (uuid, ch.sessionId) if self.ctrlCb != None: self.ctrlCb (ch.context, self.CTRL_BROKER_INFO, ch.brokerInfo) # Send a package request sendCodec = Codec () seq = self.seqMgr.reserve ("outstanding") self.setHeader (sendCodec, ord ('P'), seq) smsg = ch.message(sendCodec.encoded) ch.send ("qpid.management", smsg) def handlePackageInd (self, ch, codec): pname = codec.read_str8 () if pname not in self.packages: self.packages[pname] = {} # Send a class request sendCodec = Codec () seq = self.seqMgr.reserve ("outstanding") self.setHeader (sendCodec, ord ('Q'), seq) self.incOutstanding (ch) sendCodec.write_str8 (pname) smsg = ch.message(sendCodec.encoded) ch.send ("qpid.management", smsg) def handleClassInd (self, ch, codec): kind = codec.read_uint8() if kind != 1: # This API doesn't handle new-style events return pname = codec.read_str8() cname = codec.read_str8() hash = codec.read_bin128() if pname not in self.packages: return if (cname, hash) not in self.packages[pname]: # Send a schema request sendCodec = Codec () seq = self.seqMgr.reserve ("outstanding") self.setHeader (sendCodec, ord ('S'), seq) self.incOutstanding (ch) sendCodec.write_str8 (pname) sendCodec.write_str8 (cname) sendCodec.write_bin128 (hash) smsg = ch.message(sendCodec.encoded) ch.send ("qpid.management", smsg) def handleHeartbeat (self, ch, codec): timestamp = codec.read_uint64() if self.ctrlCb != None: self.ctrlCb (ch.context, self.CTRL_HEARTBEAT, timestamp) def handleEvent (self, ch, codec): if self.eventCb == None: return timestamp = codec.read_uint64() objId = objectId(codec) packageName = codec.read_str8() className = codec.read_str8() hash = codec.read_bin128() name = codec.read_str8() classKey = (packageName, className, hash) if classKey not in self.schema: return; schemaClass = self.schema[classKey] row = [] es = schemaClass['E'] arglist = None for ename in es: (edesc, eargs) = es[ename] if ename == name: arglist = eargs if arglist == None: return for arg in arglist: row.append((arg[0], self.decodeValue(codec, arg[1]))) self.eventCb(ch.context, classKey, objId, name, row) def parseSchema (self, ch, codec): """ Parse a received schema-description message. """ self.decOutstanding (ch) kind = codec.read_uint8() if kind != 1: # This API doesn't handle new-style events return packageName = codec.read_str8 () className = codec.read_str8 () hash = codec.read_bin128 () hasSupertype = 0 #codec.read_uint8() configCount = codec.read_uint16 () instCount = codec.read_uint16 () methodCount = codec.read_uint16 () if hasSupertype != 0: supertypePackage = codec.read_str8() supertypeClass = codec.read_str8() supertypeHash = codec.read_bin128() if packageName not in self.packages: return if (className, hash) in self.packages[packageName]: return classKey = (packageName, className, hash) if classKey in self.schema: return configs = [] insts = [] methods = {} configs.append (("id", 4, "", "", 1, 1, None, None, None, None, None)) insts.append (("id", 4, None, None)) for idx in range (configCount): ft = codec.read_map () name = str (ft["name"]) type = ft["type"] access = ft["access"] index = ft["index"] optional = ft["optional"] unit = None min = None max = None maxlen = None desc = None for key, value in ft.items (): if key == "unit": unit = str (value) elif key == "min": min = value elif key == "max": max = value elif key == "maxlen": maxlen = value elif key == "desc": desc = str (value) config = (name, type, unit, desc, access, index, min, max, maxlen, optional) configs.append (config) for idx in range (instCount): ft = codec.read_map () name = str (ft["name"]) type = ft["type"] unit = None desc = None for key, value in ft.items (): if key == "unit": unit = str (value) elif key == "desc": desc = str (value) inst = (name, type, unit, desc) insts.append (inst) for idx in range (methodCount): ft = codec.read_map () mname = str (ft["name"]) argCount = ft["argCount"] if "desc" in ft: mdesc = str (ft["desc"]) else: mdesc = None args = [] for aidx in range (argCount): ft = codec.read_map () name = str (ft["name"]) type = ft["type"] dir = str (ft["dir"].upper ()) unit = None min = None max = None maxlen = None desc = None default = None for key, value in ft.items (): if key == "unit": unit = str (value) elif key == "min": min = value elif key == "max": max = value elif key == "maxlen": maxlen = value elif key == "desc": desc = str (value) elif key == "default": default = str (value) arg = (name, type, dir, unit, desc, min, max, maxlen, default) args.append (arg) methods[mname] = (mdesc, args) schemaClass = {} schemaClass['C'] = configs schemaClass['I'] = insts schemaClass['M'] = methods self.schema[classKey] = schemaClass if self.schemaCb != None: self.schemaCb (ch.context, classKey, configs, insts, methods, {}) def parsePresenceMasks(self, codec, schemaClass): """ Generate a list of not-present properties """ excludeList = [] bit = 0 for element in schemaClass['C'][1:]: if element[9] == 1: if bit == 0: mask = codec.read_uint8() bit = 1 if (mask & bit) == 0: excludeList.append(element[0]) bit = bit * 2 if bit == 256: bit = 0 return excludeList def parseContent (self, ch, cls, codec, seq=0): """ Parse a received content message. """ if (cls == 'C' or (cls == 'B' and seq == 0)) and self.configCb == None: return if cls == 'I' and self.instCb == None: return packageName = codec.read_str8 () className = codec.read_str8 () hash = codec.read_bin128 () classKey = (packageName, className, hash) if classKey not in self.schema: return row = [] timestamps = [] timestamps.append (codec.read_uint64 ()) # Current Time timestamps.append (codec.read_uint64 ()) # Create Time timestamps.append (codec.read_uint64 ()) # Delete Time objId = objectId(codec) schemaClass = self.schema[classKey] if cls == 'C' or cls == 'B': notPresent = self.parsePresenceMasks(codec, schemaClass) if cls == 'C' or cls == 'B': row.append(("id", objId)) for element in schemaClass['C'][1:]: tc = element[1] name = element[0] if name in notPresent: row.append((name, None)) else: data = self.decodeValue(codec, tc) row.append((name, data)) if cls == 'I' or cls == 'B': if cls == 'I': row.append(("id", objId)) for element in schemaClass['I'][1:]: tc = element[1] name = element[0] data = self.decodeValue (codec, tc) row.append ((name, data)) if cls == 'C' or (cls == 'B' and seq != self.syncSequence): self.configCb (ch.context, classKey, row, timestamps) elif cls == 'B' and seq == self.syncSequence: if timestamps[2] == 0: obj = mgmtObject (classKey, timestamps, row) self.syncResult.append (obj) elif cls == 'I': self.instCb (ch.context, classKey, row, timestamps) def parse (self, ch, codec, opcode, seq): """ Parse a message received from the topic queue. """ if opcode == 's': self.parseSchema (ch, codec) elif opcode == 'c': self.parseContent (ch, 'C', codec) elif opcode == 'i': self.parseContent (ch, 'I', codec) elif opcode == 'g': self.parseContent (ch, 'B', codec, seq) else: raise ValueError ("Unknown opcode: %c" % opcode); def method (self, channel, userSequence, objId, classId, methodName, args): """ Invoke a method on an object """ codec = Codec () sequence = self.seqMgr.reserve ((userSequence, classId, methodName)) self.setHeader (codec, ord ('M'), sequence) objId.encode(codec) codec.write_str8 (classId[0]) codec.write_str8 (classId[1]) codec.write_bin128 (classId[2]) codec.write_str8 (methodName) bank = "%d.%d" % (objId.getBroker(), objId.getBank()) # Encode args according to schema if classId not in self.schema: self.seqMgr.release (sequence) raise ValueError ("Unknown class name: %s" % classId) schemaClass = self.schema[classId] ms = schemaClass['M'] arglist = None for mname in ms: (mdesc, margs) = ms[mname] if mname == methodName: arglist = margs if arglist == None: self.seqMgr.release (sequence) raise ValueError ("Unknown method name: %s" % methodName) for arg in arglist: if arg[2].find("I") != -1: value = arg[8] # default if arg[0] in args: value = args[arg[0]] if value == None: self.seqMgr.release (sequence) raise ValueError ("Missing non-defaulted argument: %s" % arg[0]) self.encodeValue (codec, value, arg[1]) packageName = classId[0] className = classId[1] msg = channel.message(codec.encoded, "agent." + bank) channel.send ("qpid.management", msg) qpid-python-0.22/python/qpid/delegates.py0000644000175000017500000001555412046451714016574 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import os, connection, session from util import notify, get_client_properties_with_defaults from datatypes import RangedSet from exceptions import VersionError, Closed from logging import getLogger from ops import Control import sys from qpid import sasl log = getLogger("qpid.io.ctl") class Delegate: def __init__(self, connection, delegate=session.client): self.connection = connection self.delegate = delegate def received(self, op): ssn = self.connection.attached.get(op.channel) if ssn is None: ch = connection.Channel(self.connection, op.channel) else: ch = ssn.channel if isinstance(op, Control): log.debug("RECV %s", op) getattr(self, op.NAME)(ch, op) elif ssn is None: ch.session_detached() else: ssn.received(op) def connection_close(self, ch, close): self.connection.close_code = (close.reply_code, close.reply_text) ch.connection_close_ok() raise Closed(close.reply_text) def connection_close_ok(self, ch, close_ok): self.connection.opened = False self.connection.closed = True notify(self.connection.condition) def connection_heartbeat(self, ch, hrt): pass def session_attach(self, ch, a): try: self.connection.attach(a.name, ch, self.delegate, a.force) ch.session_attached(a.name) except connection.ChannelBusy: ch.session_detached(a.name) except connection.SessionBusy: ch.session_detached(a.name) def session_attached(self, ch, a): notify(ch.session.condition) def session_detach(self, ch, d): #send back the confirmation of detachment before removing the #channel from the attached set; this avoids needing to hold the #connection lock during the sending of this control and ensures #that if the channel is immediately reused for a new session the #attach request will follow the detached notification. ch.session_detached(d.name) ssn = self.connection.detach(d.name, ch) def session_detached(self, ch, d): self.connection.detach(d.name, ch) def session_request_timeout(self, ch, rt): ch.session_timeout(rt.timeout); def session_command_point(self, ch, cp): ssn = ch.session ssn.receiver.next_id = cp.command_id ssn.receiver.next_offset = cp.command_offset def session_completed(self, ch, cmp): ch.session.sender.completed(cmp.commands) if cmp.timely_reply: ch.session_known_completed(cmp.commands) notify(ch.session.condition) def session_known_completed(self, ch, kn_cmp): ch.session.receiver.known_completed(kn_cmp.commands) def session_flush(self, ch, f): rcv = ch.session.receiver if f.expected: if rcv.next_id == None: exp = None else: exp = RangedSet(rcv.next_id) ch.session_expected(exp) if f.confirmed: ch.session_confirmed(rcv._completed) if f.completed: ch.session_completed(rcv._completed) class Server(Delegate): def start(self): self.connection.read_header() # XXX self.connection.write_header(0, 10) connection.Channel(self.connection, 0).connection_start(mechanisms=["ANONYMOUS"]) def connection_start_ok(self, ch, start_ok): ch.connection_tune(channel_max=65535) def connection_tune_ok(self, ch, tune_ok): pass def connection_open(self, ch, open): self.connection.opened = True ch.connection_open_ok() notify(self.connection.condition) class Client(Delegate): def __init__(self, connection, username=None, password=None, mechanism=None, heartbeat=None, **kwargs): Delegate.__init__(self, connection) provided_client_properties = kwargs.get("client_properties") self.client_properties=get_client_properties_with_defaults(provided_client_properties) ## ## self.acceptableMechanisms is the list of SASL mechanisms that the client is willing to ## use. If it's None, then any mechanism is acceptable. ## self.acceptableMechanisms = None if mechanism: self.acceptableMechanisms = mechanism.split(" ") self.heartbeat = heartbeat self.username = username self.password = password self.sasl = sasl.Client() if username and len(username) > 0: self.sasl.setAttr("username", str(username)) if password and len(password) > 0: self.sasl.setAttr("password", str(password)) self.sasl.setAttr("service", str(kwargs.get("service", "qpidd"))) if "host" in kwargs: self.sasl.setAttr("host", str(kwargs["host"])) if "min_ssf" in kwargs: self.sasl.setAttr("minssf", kwargs["min_ssf"]) if "max_ssf" in kwargs: self.sasl.setAttr("maxssf", kwargs["max_ssf"]) self.sasl.init() def start(self): # XXX cli_major = 0 cli_minor = 10 self.connection.write_header(cli_major, cli_minor) magic, _, _, major, minor = self.connection.read_header() if not (magic == "AMQP" and major == cli_major and minor == cli_minor): raise VersionError("client: %s-%s, server: %s-%s" % (cli_major, cli_minor, major, minor)) def connection_start(self, ch, start): mech_list = "" for mech in start.mechanisms: if (not self.acceptableMechanisms) or mech in self.acceptableMechanisms: mech_list += str(mech) + " " mech = None initial = None try: mech, initial = self.sasl.start(mech_list) except Exception, e: raise Closed(str(e)) ch.connection_start_ok(client_properties=self.client_properties, mechanism=mech, response=initial) def connection_secure(self, ch, secure): resp = None try: resp = self.sasl.step(secure.challenge) except Exception, e: raise Closed(str(e)) ch.connection_secure_ok(response=resp) def connection_tune(self, ch, tune): ch.connection_tune_ok(heartbeat=self.heartbeat) ch.connection_open() self.connection.user_id = self.sasl.auth_username() self.connection.security_layer_tx = self.sasl def connection_open_ok(self, ch, open_ok): self.connection.security_layer_rx = self.sasl self.connection.opened = True notify(self.connection.condition) def connection_heartbeat(self, ch, hrt): ch.connection_heartbeat() qpid-python-0.22/python/qpid/packer.py0000644000175000017500000000214210763325375016100 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import struct class Packer: def read(self, n): abstract def write(self, s): abstract def unpack(self, fmt): values = struct.unpack(fmt, self.read(struct.calcsize(fmt))) if len(values) == 1: return values[0] else: return values def pack(self, fmt, *args): self.write(struct.pack(fmt, *args)) qpid-python-0.22/python/qpid/testlib.py0000644000175000017500000002220712046451714016276 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Support library for qpid python tests. # import unittest, traceback, socket import qpid.client, qmf.console import Queue from qpid.content import Content from qpid.message import Message from qpid.harness import Skipped from qpid.exceptions import VersionError import qpid.messaging from qpidtoollibs import BrokerAgent class TestBase(unittest.TestCase): """Base class for Qpid test cases. self.client is automatically connected with channel 1 open before the test methods are run. Deletes queues and exchanges after. Tests call self.queue_declare(channel, ...) and self.exchange_declare(chanel, ...) which are wrappers for the Channel functions that note resources to clean up later. """ def configure(self, config): self.config = config def setUp(self): self.queues = [] self.exchanges = [] self.client = self.connect() self.channel = self.client.channel(1) self.version = (self.client.spec.major, self.client.spec.minor) if self.version == (8, 0) or self.version == (0, 9): self.channel.channel_open() else: self.channel.session_open() def tearDown(self): try: for ch, q in self.queues: ch.queue_delete(queue=q) for ch, ex in self.exchanges: ch.exchange_delete(exchange=ex) except: print "Error on tearDown:" print traceback.print_exc() if not self.client.closed: self.client.channel(0).connection_close(reply_code=200) else: self.client.close() def connect(self, host=None, port=None, user=None, password=None, tune_params=None, client_properties=None): """Create a new connction, return the Client object""" host = host or self.config.broker.host port = port or self.config.broker.port or 5672 user = user or "guest" password = password or "guest" client = qpid.client.Client(host, port) try: if client.spec.major == 8 and client.spec.minor == 0: client.start({"LOGIN": user, "PASSWORD": password}, tune_params=tune_params, client_properties=client_properties) else: client.start("\x00" + user + "\x00" + password, mechanism="PLAIN", tune_params=tune_params, client_properties=client_properties) except qpid.client.Closed, e: if isinstance(e.args[0], VersionError): raise Skipped(e.args[0]) else: raise e except socket.error, e: raise Skipped(e) return client def queue_declare(self, channel=None, *args, **keys): channel = channel or self.channel reply = channel.queue_declare(*args, **keys) self.queues.append((channel, keys["queue"])) return reply def exchange_declare(self, channel=None, ticket=0, exchange='', type='', passive=False, durable=False, auto_delete=False, arguments={}): channel = channel or self.channel reply = channel.exchange_declare(ticket=ticket, exchange=exchange, type=type, passive=passive,durable=durable, auto_delete=auto_delete, arguments=arguments) self.exchanges.append((channel,exchange)) return reply def uniqueString(self): """Generate a unique string, unique for this TestBase instance""" if not "uniqueCounter" in dir(self): self.uniqueCounter = 1; return "Test Message " + str(self.uniqueCounter) def consume(self, queueName): """Consume from named queue returns the Queue object.""" reply = self.channel.basic_consume(queue=queueName, no_ack=True) return self.client.queue(reply.consumer_tag) def subscribe(self, channel=None, **keys): channel = channel or self.channel consumer_tag = keys["destination"] channel.message_subscribe(**keys) channel.message_flow(destination=consumer_tag, unit=0, value=0xFFFFFFFFL) channel.message_flow(destination=consumer_tag, unit=1, value=0xFFFFFFFFL) def assertEmpty(self, queue): """Assert that the queue is empty""" try: queue.get(timeout=1) self.fail("Queue is not empty.") except Queue.Empty: None # Ignore def assertPublishGet(self, queue, exchange="", routing_key="", properties=None): """ Publish to exchange and assert queue.get() returns the same message. """ body = self.uniqueString() self.channel.basic_publish( exchange=exchange, content=Content(body, properties=properties), routing_key=routing_key) msg = queue.get(timeout=1) self.assertEqual(body, msg.content.body) if (properties): self.assertEqual(properties, msg.content.properties) def assertPublishConsume(self, queue="", exchange="", routing_key="", properties=None): """ Publish a message and consume it, assert it comes back intact. Return the Queue object used to consume. """ self.assertPublishGet(self.consume(queue), exchange, routing_key, properties) def assertChannelException(self, expectedCode, message): if self.version == (8, 0) or self.version == (0, 9): if not isinstance(message, Message): self.fail("expected channel_close method, got %s" % (message)) self.assertEqual("channel", message.method.klass.name) self.assertEqual("close", message.method.name) else: if not isinstance(message, Message): self.fail("expected session_closed method, got %s" % (message)) self.assertEqual("session", message.method.klass.name) self.assertEqual("closed", message.method.name) self.assertEqual(expectedCode, message.reply_code) def assertConnectionException(self, expectedCode, message): if not isinstance(message, Message): self.fail("expected connection_close method, got %s" % (message)) self.assertEqual("connection", message.method.klass.name) self.assertEqual("close", message.method.name) self.assertEqual(expectedCode, message.reply_code) #0-10 support from qpid.connection import Connection from qpid.util import connect, ssl, URL class TestBase010(unittest.TestCase): """ Base class for Qpid test cases. using the final 0-10 spec """ def configure(self, config): self.config = config self.broker = config.broker self.defines = self.config.defines def setUp(self): self.conn = self.connect() self.session = self.conn.session("test-session", timeout=10) self.qmf = None self.test_queue_name = self.id() def startQmf(self, handler=None): self.qmf = qmf.console.Session(handler) self.qmf_broker = self.qmf.addBroker(str(self.broker)) def startBrokerAccess(self): """ New-style management access to the broker. Can be used in lieu of startQmf. """ if 'broker_conn' not in self.__dict__: self.broker_conn = qpid.messaging.Connection(str(self.broker)) self.broker_conn.open() self.broker_access = BrokerAgent(self.broker_conn) def connect(self, host=None, port=None): url = self.broker if url.scheme == URL.AMQPS: default_port = 5671 else: default_port = 5672 try: sock = connect(host or url.host, port or url.port or default_port) except socket.error, e: raise Skipped(e) if url.scheme == URL.AMQPS: sock = ssl(sock) conn = Connection(sock, username=url.user or "guest", password=url.password or "guest") try: conn.start(timeout=10) except VersionError, e: raise Skipped(e) return conn def tearDown(self): if not self.session.error(): self.session.close(timeout=10) self.conn.close(timeout=10) if self.qmf: self.qmf.delBroker(self.qmf_broker) def subscribe(self, session=None, **keys): session = session or self.session consumer_tag = keys["destination"] session.message_subscribe(**keys) session.message_flow(destination=consumer_tag, unit=0, value=0xFFFFFFFFL) session.message_flow(destination=consumer_tag, unit=1, value=0xFFFFFFFFL) qpid-python-0.22/python/qpid/managementdata.py0000644000175000017500000006055411301056212017570 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # ############################################################################### ## This file is being obsoleted by qmf/console.py ############################################################################### import qpid import re import socket import struct import os import platform import locale from qpid.connection import Timeout from qpid.management import managementChannel, managementClient from threading import Lock from disp import Display from shlex import split from qpid.connection import Connection from qpid.util import connect class Broker: def __init__ (self, text): rex = re.compile(r""" # [ [ / ] @] [ : ] ^ (?: ([^/]*) (?: / ([^@]*) )? @)? ([^:]+) (?: :([0-9]+))?$""", re.X) match = rex.match(text) if not match: raise ValueError("'%s' is not a valid broker url" % (text)) user, password, host, port = match.groups() if port: self.port = int(port) else: self.port = 5672 for addr in socket.getaddrinfo(host, self.port): if addr[1] == socket.AF_INET: self.host = addr[4][0] self.username = user or "guest" self.password = password or "guest" def name (self): return self.host + ":" + str (self.port) class ManagementData: # # Data Structure: # # Please note that this data structure holds only the most recent # configuration and instrumentation data for each object. It does # not hold the detailed historical data that is sent from the broker. # The only historical data it keeps are the high and low watermarks # for hi-lo statistics. # # tables :== {class-key} # {} # (timestamp, config-record, inst-record) # class-key :== (, , ) # timestamp :== (, , ) # config-record :== [element] # inst-record :== [element] # element :== (, ) # def registerObjId (self, objId): if not objId.index() in self.idBackMap: self.idBackMap[objId.index()] = self.nextId self.idMap[self.nextId] = objId self.nextId += 1 def displayObjId (self, objIdIndex): if objIdIndex in self.idBackMap: return self.idBackMap[objIdIndex] else: return 0 def rawObjId (self, displayId): if displayId in self.idMap: return self.idMap[displayId] else: return None def displayClassName (self, cls): (packageName, className, hash) = cls rev = self.schema[cls][4] if rev == 0: suffix = "" else: suffix = ".%d" % rev return packageName + ":" + className + suffix def dataHandler (self, context, className, list, timestamps): """ Callback for configuration and instrumentation data updates """ self.lock.acquire () try: # If this class has not been seen before, create an empty dictionary to # hold objects of this class if className not in self.tables: self.tables[className] = {} # Register the ID so a more friendly presentation can be displayed objId = list[0][1] oidx = objId.index() self.registerObjId (objId) # If this object hasn't been seen before, create a new object record with # the timestamps and empty lists for configuration and instrumentation data. if oidx not in self.tables[className]: self.tables[className][oidx] = (timestamps, [], []) (unused, oldConf, oldInst) = self.tables[className][oidx] # For config updates, simply replace old config list with the new one. if context == 0: #config self.tables[className][oidx] = (timestamps, list, oldInst) # For instrumentation updates, carry the minimum and maximum values for # "hi-lo" stats forward. elif context == 1: #inst if len (oldInst) == 0: newInst = list else: newInst = [] for idx in range (len (list)): (key, value) = list[idx] if key.find ("High") == len (key) - 4: if oldInst[idx][1] > value: value = oldInst[idx][1] if key.find ("Low") == len (key) - 3: if oldInst[idx][1] < value: value = oldInst[idx][1] newInst.append ((key, value)) self.tables[className][oidx] = (timestamps, oldConf, newInst) finally: self.lock.release () def ctrlHandler (self, context, op, data): if op == self.mclient.CTRL_BROKER_INFO: pass elif op == self.mclient.CTRL_HEARTBEAT: pass def configHandler (self, context, className, list, timestamps): self.dataHandler (0, className, list, timestamps); def instHandler (self, context, className, list, timestamps): self.dataHandler (1, className, list, timestamps); def methodReply (self, broker, sequence, status, sText, args): """ Callback for method-reply messages """ self.lock.acquire () try: line = "Call Result: " + self.methodsPending[sequence] + \ " " + str (status) + " (" + sText + ")" print line, args del self.methodsPending[sequence] finally: self.lock.release () def closeHandler (self, context, reason): if self.operational: print "Connection to broker lost:", reason self.operational = False if self.cli != None: self.cli.setPromptMessage ("Broker Disconnected") def schemaHandler (self, context, classKey, configs, insts, methods, events): """ Callback for schema updates """ if classKey not in self.schema: schemaRev = 0 for key in self.schema: if classKey[0] == key[0] and classKey[1] == key[1]: schemaRev += 1 self.schema[classKey] = (configs, insts, methods, events, schemaRev) def setCli (self, cliobj): self.cli = cliobj def __init__ (self, disp, host, username="guest", password="guest"): self.lock = Lock () self.tables = {} self.schema = {} self.bootSequence = 0 self.operational = False self.disp = disp self.cli = None self.lastUnit = None self.methodSeq = 1 self.methodsPending = {} self.sessionId = "%s.%d" % (platform.uname()[1], os.getpid()) self.broker = Broker (host) sock = connect (self.broker.host, self.broker.port) oldTimeout = sock.gettimeout() sock.settimeout(10) self.conn = Connection (sock, username=self.broker.username, password=self.broker.password) def aborted(): raise Timeout("Waiting for connection to be established with broker") oldAborted = self.conn.aborted self.conn.aborted = aborted self.conn.start () sock.settimeout(oldTimeout) self.conn.aborted = oldAborted self.mclient = managementClient ("unused", self.ctrlHandler, self.configHandler, self.instHandler, self.methodReply, self.closeHandler) self.mclient.schemaListener (self.schemaHandler) self.mch = self.mclient.addChannel (self.conn.session(self.sessionId)) self.operational = True self.idMap = {} self.idBackMap = {} self.nextId = 101 def close (self): pass def refName (self, oid): if oid == None: return "NULL" return str (self.displayObjId (oid.index())) def valueDisplay (self, classKey, key, value): if value == None: return "" for kind in range (2): schema = self.schema[classKey][kind] for item in schema: if item[0] == key: typecode = item[1] unit = item[2] if (typecode >= 1 and typecode <= 5) or typecode == 12 or typecode == 13 or \ (typecode >= 16 and typecode <= 19): if unit == None or unit == self.lastUnit: return str (value) else: self.lastUnit = unit suffix = "" if value != 1: suffix = "s" return str (value) + " " + unit + suffix elif typecode == 6 or typecode == 7: # strings return value elif typecode == 8: if value == 0: return "--" return self.disp.timestamp (value) elif typecode == 9: return str (value) elif typecode == 10: return self.refName (value) elif typecode == 11: if value == 0: return "False" else: return "True" elif typecode == 14: return str (value) elif typecode == 15: return str (value) return "*type-error*" def getObjIndex (self, classKey, config): """ Concatenate the values from index columns to form a unique object name """ result = "" schemaConfig = self.schema[classKey][0] for item in schemaConfig: if item[5] == 1 and item[0] != "id": if result != "": result = result + "." for key,val in config: if key == item[0]: result = result + self.valueDisplay (classKey, key, val) return result def getClassKey (self, className): delimPos = className.find(":") if delimPos == -1: schemaRev = 0 delim = className.find(".") if delim != -1: schemaRev = int(className[delim + 1:]) name = className[0:delim] else: name = className for key in self.schema: if key[1] == name and self.schema[key][4] == schemaRev: return key else: package = className[0:delimPos] name = className[delimPos + 1:] schemaRev = 0 delim = name.find(".") if delim != -1: schemaRev = int(name[delim + 1:]) name = name[0:delim] for key in self.schema: if key[0] == package and key[1] == name: if self.schema[key][4] == schemaRev: return key return None def classCompletions (self, prefix): """ Provide a list of candidate class names for command completion """ self.lock.acquire () complist = [] try: for name in self.tables: if name.find (prefix) == 0: complist.append (name) finally: self.lock.release () return complist def typeName (self, typecode): """ Convert type-codes to printable strings """ if typecode == 1: return "uint8" elif typecode == 2: return "uint16" elif typecode == 3: return "uint32" elif typecode == 4: return "uint64" elif typecode == 5: return "bool" elif typecode == 6: return "short-string" elif typecode == 7: return "long-string" elif typecode == 8: return "abs-time" elif typecode == 9: return "delta-time" elif typecode == 10: return "reference" elif typecode == 11: return "boolean" elif typecode == 12: return "float" elif typecode == 13: return "double" elif typecode == 14: return "uuid" elif typecode == 15: return "field-table" elif typecode == 16: return "int8" elif typecode == 17: return "int16" elif typecode == 18: return "int32" elif typecode == 19: return "int64" elif typecode == 20: return "object" elif typecode == 21: return "list" elif typecode == 22: return "array" else: raise ValueError ("Invalid type code: %d" % typecode) def accessName (self, code): """ Convert element access codes to printable strings """ if code == 1: return "ReadCreate" elif code == 2: return "ReadWrite" elif code == 3: return "ReadOnly" else: raise ValueError ("Invalid access code: %d" %code) def notNone (self, text): if text == None: return "" else: return text def isOid (self, id): for char in str (id): if not char.isdigit () and not char == '-': return False return True def listOfIds (self, classKey, tokens): """ Generate a tuple of object ids for a classname based on command tokens. """ list = [] if len(tokens) == 0 or tokens[0] == "all": for id in self.tables[classKey]: list.append (self.displayObjId (id)) elif tokens[0] == "active": for id in self.tables[classKey]: if self.tables[classKey][id][0][2] == 0: list.append (self.displayObjId (id)) else: for token in tokens: if self.isOid (token): if token.find ("-") != -1: ids = token.split("-", 2) for id in range (int (ids[0]), int (ids[1]) + 1): if self.getClassForId (self.rawObjId (long (id))) == classKey: list.append (id) else: list.append (int(token)) list.sort () result = () for item in list: result = result + (item,) return result def listClasses (self): """ Generate a display of the list of classes """ self.lock.acquire () try: rows = [] sorted = self.tables.keys () sorted.sort () for name in sorted: active = 0 deleted = 0 for record in self.tables[name]: isdel = False ts = self.tables[name][record][0] if ts[2] > 0: isdel = True if isdel: deleted = deleted + 1 else: active = active + 1 rows.append ((self.displayClassName (name), active, deleted)) if len (rows) != 0: self.disp.table ("Management Object Types:", ("ObjectType", "Active", "Deleted"), rows) else: print "Waiting for next periodic update" finally: self.lock.release () def listObjects (self, tokens): """ Generate a display of a list of objects in a class """ if len(tokens) == 0: print "Error - No class name provided" return self.lock.acquire () try: classKey = self.getClassKey (tokens[0]) if classKey == None: print ("Object type %s not known" % tokens[0]) else: rows = [] if classKey in self.tables: ids = self.listOfIds(classKey, tokens[1:]) for objId in ids: (ts, config, inst) = self.tables[classKey][self.rawObjId(objId).index()] createTime = self.disp.timestamp (ts[1]) destroyTime = "-" if ts[2] > 0: destroyTime = self.disp.timestamp (ts[2]) objIndex = self.getObjIndex (classKey, config) row = (objId, createTime, destroyTime, objIndex) rows.append (row) self.disp.table ("Objects of type %s" % self.displayClassName(classKey), ("ID", "Created", "Destroyed", "Index"), rows) finally: self.lock.release () def showObjects (self, tokens): """ Generate a display of object data for a particular class """ self.lock.acquire () try: self.lastUnit = None if self.isOid (tokens[0]): if tokens[0].find ("-") != -1: rootId = int (tokens[0][0:tokens[0].find ("-")]) else: rootId = int (tokens[0]) classKey = self.getClassForId (self.rawObjId (rootId)) remaining = tokens if classKey == None: print "Id not known: %d" % int (tokens[0]) raise ValueError () else: classKey = self.getClassKey (tokens[0]) remaining = tokens[1:] if classKey not in self.tables: print "Class not known: %s" % tokens[0] raise ValueError () userIds = self.listOfIds (classKey, remaining) if len (userIds) == 0: print "No object IDs supplied" raise ValueError () ids = [] for id in userIds: if self.getClassForId (self.rawObjId (long (id))) == classKey: ids.append (self.rawObjId (long (id))) rows = [] timestamp = None config = self.tables[classKey][ids[0].index()][1] for eIdx in range (len (config)): key = config[eIdx][0] if key != "id": row = ("property", key) for id in ids: if timestamp == None or \ timestamp < self.tables[classKey][id.index()][0][0]: timestamp = self.tables[classKey][id.index()][0][0] (key, value) = self.tables[classKey][id.index()][1][eIdx] row = row + (self.valueDisplay (classKey, key, value),) rows.append (row) inst = self.tables[classKey][ids[0].index()][2] for eIdx in range (len (inst)): key = inst[eIdx][0] if key != "id": row = ("statistic", key) for id in ids: (key, value) = self.tables[classKey][id.index()][2][eIdx] row = row + (self.valueDisplay (classKey, key, value),) rows.append (row) titleRow = ("Type", "Element") for id in ids: titleRow = titleRow + (self.refName(id),) caption = "Object of type %s:" % self.displayClassName(classKey) if timestamp != None: caption = caption + " (last sample time: " + self.disp.timestamp (timestamp) + ")" self.disp.table (caption, titleRow, rows) except: pass self.lock.release () def schemaSummary (self): """ Generate a display of the list of classes in the schema """ self.lock.acquire () try: rows = [] sorted = self.schema.keys () sorted.sort () for classKey in sorted: tuple = self.schema[classKey] row = (self.displayClassName(classKey), len (tuple[0]), len (tuple[1]), len (tuple[2])) rows.append (row) self.disp.table ("Classes in Schema:", ("Class", "Properties", "Statistics", "Methods"), rows) finally: self.lock.release () def schemaTable (self, className): """ Generate a display of details of the schema of a particular class """ self.lock.acquire () try: classKey = self.getClassKey (className) if classKey == None: print ("Class name %s not known" % className) raise ValueError () rows = [] schemaRev = self.schema[classKey][4] for config in self.schema[classKey][0]: name = config[0] if name != "id": typename = self.typeName(config[1]) unit = self.notNone (config[2]) desc = self.notNone (config[3]) access = self.accessName (config[4]) extra = "" if config[5] == 1: extra += "index " if config[6] != None: extra += "Min: " + str(config[6]) + " " if config[7] != None: extra += "Max: " + str(config[7]) + " " if config[8] != None: extra += "MaxLen: " + str(config[8]) + " " if config[9] == 1: extra += "optional " rows.append ((name, typename, unit, access, extra, desc)) for config in self.schema[classKey][1]: name = config[0] if name != "id": typename = self.typeName(config[1]) unit = self.notNone (config[2]) desc = self.notNone (config[3]) rows.append ((name, typename, unit, "", "", desc)) titles = ("Element", "Type", "Unit", "Access", "Notes", "Description") self.disp.table ("Schema for class '%s':" % self.displayClassName(classKey), titles, rows) for mname in self.schema[classKey][2]: (mdesc, args) = self.schema[classKey][2][mname] caption = "\nMethod '%s' %s" % (mname, self.notNone (mdesc)) rows = [] for arg in args: name = arg[0] typename = self.typeName (arg[1]) dir = arg[2] unit = self.notNone (arg[3]) desc = self.notNone (arg[4]) extra = "" if arg[5] != None: extra = extra + "Min: " + str (arg[5]) if arg[6] != None: extra = extra + "Max: " + str (arg[6]) if arg[7] != None: extra = extra + "MaxLen: " + str (arg[7]) if arg[8] != None: extra = extra + "Default: " + str (arg[8]) rows.append ((name, typename, dir, unit, extra, desc)) titles = ("Argument", "Type", "Direction", "Unit", "Notes", "Description") self.disp.table (caption, titles, rows) except Exception,e: pass self.lock.release () def getClassForId (self, objId): """ Given an object ID, return the class key for the referenced object """ for classKey in self.tables: if objId.index() in self.tables[classKey]: return classKey return None def callMethod (self, userOid, methodName, args): self.lock.acquire () methodOk = True try: classKey = self.getClassForId (self.rawObjId (userOid)) if classKey == None: raise ValueError () if methodName not in self.schema[classKey][2]: print "Method '%s' not valid for class '%s'" % (methodName, self.displayClassName(classKey)) raise ValueError () schemaMethod = self.schema[classKey][2][methodName] count = 0 for arg in range(len(schemaMethod[1])): if schemaMethod[1][arg][2].find("I") != -1: count += 1 if len (args) != count: print "Wrong number of method args: Need %d, Got %d" % (count, len (args)) raise ValueError () namedArgs = {} idx = 0 for arg in range(len(schemaMethod[1])): if schemaMethod[1][arg][2].find("I") != -1: namedArgs[schemaMethod[1][arg][0]] = args[idx] idx += 1 self.methodSeq = self.methodSeq + 1 self.methodsPending[self.methodSeq] = methodName except Exception, e: methodOk = False self.lock.release () if methodOk: # try: self.mclient.callMethod (self.mch, self.methodSeq, self.rawObjId (userOid), classKey, methodName, namedArgs) # except ValueError, e: # print "Error invoking method:", e def makeIdRow (self, displayId): if displayId in self.idMap: objId = self.idMap[displayId] else: return None if objId.getFlags() == 0: flags = "" else: flags = str(objId.getFlags()) seq = objId.getSequence() if seq == 0: seqText = "" else: seqText = str(seq) return (displayId, flags, seqText, objId.getBroker(), objId.getBank(), hex(objId.getObject())) def listIds (self, select): rows = [] if select == 0: sorted = self.idMap.keys() sorted.sort() for displayId in sorted: row = self.makeIdRow (displayId) rows.append(row) else: row = self.makeIdRow (select) if row == None: print "Display Id %d not known" % select return rows.append(row) self.disp.table("Translation of Display IDs:", ("DisplayID", "Flags", "BootSequence", "Broker", "Bank", "Object"), rows) def do_list (self, data): tokens = data.split () if len (tokens) == 0: self.listClasses () else: self.listObjects (tokens) def do_show (self, data): tokens = data.split () self.showObjects (tokens) def do_schema (self, data): if data == "": self.schemaSummary () else: self.schemaTable (data) def do_call (self, data): encTokens = data.split () try: tokens = [a.decode(locale.getpreferredencoding()) for a in encArgs] except: tokens = encTokens if len (tokens) < 2: print "Not enough arguments supplied" return displayId = long (tokens[0]) methodName = tokens[1] args = tokens[2:] self.callMethod (displayId, methodName, args) def do_id (self, data): if data == "": select = 0 else: select = int(data) self.listIds(select) def do_exit (self): self.mclient.removeChannel (self.mch) qpid-python-0.22/python/qpid/disp.py0000644000175000017500000001400011446375662015571 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from time import strftime, gmtime class Header: """ """ NONE = 1 KMG = 2 YN = 3 Y = 4 TIME_LONG = 5 TIME_SHORT = 6 DURATION = 7 def __init__(self, text, format=NONE): self.text = text self.format = format def __repr__(self): return self.text def __str__(self): return self.text def formatted(self, value): try: if value == None: return '' if self.format == Header.NONE: return value if self.format == Header.KMG: return self.num(value) if self.format == Header.YN: if value: return 'Y' return 'N' if self.format == Header.Y: if value: return 'Y' return '' if self.format == Header.TIME_LONG: return strftime("%c", gmtime(value / 1000000000)) if self.format == Header.TIME_SHORT: return strftime("%X", gmtime(value / 1000000000)) if self.format == Header.DURATION: if value < 0: value = 0 sec = value / 1000000000 min = sec / 60 hour = min / 60 day = hour / 24 result = "" if day > 0: result = "%dd " % day if hour > 0 or result != "": result += "%dh " % (hour % 24) if min > 0 or result != "": result += "%dm " % (min % 60) result += "%ds" % (sec % 60) return result except: return "?" def numCell(self, value, tag): fp = float(value) / 1000. if fp < 10.0: return "%1.2f%c" % (fp, tag) if fp < 100.0: return "%2.1f%c" % (fp, tag) return "%4d%c" % (value / 1000, tag) def num(self, value): if value < 1000: return "%4d" % value if value < 1000000: return self.numCell(value, 'k') value /= 1000 if value < 1000000: return self.numCell(value, 'm') value /= 1000 return self.numCell(value, 'g') class Display: """ Display formatting for QPID Management CLI """ def __init__(self, spacing=2, prefix=" "): self.tableSpacing = spacing self.tablePrefix = prefix self.timestampFormat = "%X" def formattedTable(self, title, heads, rows): fRows = [] for row in rows: fRow = [] col = 0 for cell in row: fRow.append(heads[col].formatted(cell)) col += 1 fRows.append(fRow) headtext = [] for head in heads: headtext.append(head.text) self.table(title, headtext, fRows) def table(self, title, heads, rows): """ Print a table with autosized columns """ # Pad the rows to the number of heads for row in rows: diff = len(heads) - len(row) for idx in range(diff): row.append("") print title if len (rows) == 0: return colWidth = [] col = 0 line = self.tablePrefix for head in heads: width = len (head) for row in rows: cellWidth = len (unicode (row[col])) if cellWidth > width: width = cellWidth colWidth.append (width + self.tableSpacing) line = line + head if col < len (heads) - 1: for i in range (colWidth[col] - len (head)): line = line + " " col = col + 1 print line line = self.tablePrefix for width in colWidth: for i in range (width): line = line + "=" print line for row in rows: line = self.tablePrefix col = 0 for width in colWidth: line = line + unicode (row[col]) if col < len (heads) - 1: for i in range (width - len (unicode (row[col]))): line = line + " " col = col + 1 print line def do_setTimeFormat (self, fmt): """ Select timestamp format """ if fmt == "long": self.timestampFormat = "%c" elif fmt == "short": self.timestampFormat = "%X" def timestamp (self, nsec): """ Format a nanosecond-since-the-epoch timestamp for printing """ return strftime (self.timestampFormat, gmtime (nsec / 1000000000)) def duration(self, nsec): if nsec < 0: nsec = 0 sec = nsec / 1000000000 min = sec / 60 hour = min / 60 day = hour / 24 result = "" if day > 0: result = "%dd " % day if hour > 0 or result != "": result += "%dh " % (hour % 24) if min > 0 or result != "": result += "%dm " % (min % 60) result += "%ds" % (sec % 60) return result class Sortable: """ """ def __init__(self, row, sortIndex): self.row = row self.sortIndex = sortIndex if sortIndex >= len(row): raise Exception("sort index exceeds row boundary") def __cmp__(self, other): return cmp(self.row[self.sortIndex], other.row[self.sortIndex]) def getRow(self): return self.row class Sorter: """ """ def __init__(self, heads, rows, sortCol, limit=0, inc=True): col = 0 for head in heads: if head.text == sortCol: break col += 1 if col == len(heads): raise Exception("sortCol '%s', not found in headers" % sortCol) list = [] for row in rows: list.append(Sortable(row, col)) list.sort() if not inc: list.reverse() count = 0 self.sorted = [] for row in list: self.sorted.append(row.getRow()) count += 1 if count == limit: break def getSorted(self): return self.sorted qpid-python-0.22/python/qpid/debug.py0000644000175000017500000000334411214247377015723 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import threading, traceback, signal, sys, time def stackdump(sig, frm): code = [] for threadId, stack in sys._current_frames().items(): code.append("\n# ThreadID: %s" % threadId) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) print "\n".join(code) signal.signal(signal.SIGQUIT, stackdump) class LoudLock: def __init__(self): self.lock = threading.RLock() def acquire(self, blocking=1): while not self.lock.acquire(blocking=0): time.sleep(1) print >> sys.out, "TRYING" traceback.print_stack(None, None, out) print >> sys.out, "TRYING" print >> sys.out, "ACQUIRED" traceback.print_stack(None, None, out) print >> sys.out, "ACQUIRED" return True def _is_owned(self): return self.lock._is_owned() def release(self): self.lock.release() qpid-python-0.22/python/qpid/connection08.py0000644000175000017500000003477212142746464017157 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ A Connection class containing socket code that uses the spec metadata to read and write Frame objects. This could be used by a client, server, or even a proxy implementation. """ import socket, codec, logging, qpid from cStringIO import StringIO from codec import EOF from compat import SHUT_RDWR from exceptions import VersionError from logging import getLogger, DEBUG log = getLogger("qpid.connection08") class SockIO: def __init__(self, sock): self.sock = sock def write(self, buf): if log.isEnabledFor(DEBUG): log.debug("OUT: %r", buf) self.sock.sendall(buf) def read(self, n): data = "" while len(data) < n: try: s = self.sock.recv(n - len(data)) except socket.error: break if len(s) == 0: break data += s if log.isEnabledFor(DEBUG): log.debug("IN: %r", data) return data def flush(self): pass def close(self): self.sock.shutdown(SHUT_RDWR) self.sock.close() def connect(host, port): sock = socket.socket() sock.connect((host, port)) sock.setblocking(1) return SockIO(sock) def listen(host, port, predicate = lambda: True): sock = socket.socket() sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((host, port)) sock.listen(5) while predicate(): s, a = sock.accept() yield SockIO(s) class FramingError(Exception): pass class Connection: def __init__(self, io, spec): self.codec = codec.Codec(io, spec) self.spec = spec self.FRAME_END = self.spec.constants.byname["frame_end"].id self.write = getattr(self, "write_%s_%s" % (self.spec.major, self.spec.minor)) self.read = getattr(self, "read_%s_%s" % (self.spec.major, self.spec.minor)) def flush(self): self.codec.flush() INIT="!4s4B" def init(self): self.codec.pack(Connection.INIT, "AMQP", 1, 1, self.spec.major, self.spec.minor) def tini(self): self.codec.unpack(Connection.INIT) def write_8_0(self, frame): c = self.codec c.encode_octet(self.spec.constants.byname[frame.type].id) c.encode_short(frame.channel) body = StringIO() enc = codec.Codec(body, self.spec) frame.encode(enc) enc.flush() c.encode_longstr(body.getvalue()) c.encode_octet(self.FRAME_END) def read_8_0(self): c = self.codec tid = c.decode_octet() try: type = self.spec.constants.byid[tid].name except KeyError: if tid == ord('A') and c.unpack("!3s") == "MQP": _, _, major, minor = c.unpack("4B") raise VersionError("client: %s-%s, server: %s-%s" % (self.spec.major, self.spec.minor, major, minor)) else: raise FramingError("unknown frame type: %s" % tid) try: channel = c.decode_short() body = c.decode_longstr() dec = codec.Codec(StringIO(body), self.spec) frame = Frame.DECODERS[type].decode(self.spec, dec, len(body)) frame.channel = channel end = c.decode_octet() if end != self.FRAME_END: garbage = "" while end != self.FRAME_END: garbage += chr(end) end = c.decode_octet() raise "frame error: expected %r, got %r" % (self.FRAME_END, garbage) return frame except EOF: # An EOF caught here can indicate an error decoding the frame, # rather than that a disconnection occurred,so it's worth logging it. log.exception("Error occurred when reading frame with tid %s" % tid) raise def write_0_9(self, frame): self.write_8_0(frame) def read_0_9(self): return self.read_8_0() def write_0_91(self, frame): self.write_8_0(frame) def read_0_91(self): return self.read_8_0() def write_0_10(self, frame): c = self.codec flags = 0 if frame.bof: flags |= 0x08 if frame.eof: flags |= 0x04 if frame.bos: flags |= 0x02 if frame.eos: flags |= 0x01 c.encode_octet(flags) # TODO: currently fixed at ver=0, B=E=b=e=1 c.encode_octet(self.spec.constants.byname[frame.type].id) body = StringIO() enc = codec.Codec(body, self.spec) frame.encode(enc) enc.flush() frame_size = len(body.getvalue()) + 12 # TODO: Magic number (frame header size) c.encode_short(frame_size) c.encode_octet(0) # Reserved c.encode_octet(frame.subchannel & 0x0f) c.encode_short(frame.channel) c.encode_long(0) # Reserved c.write(body.getvalue()) c.encode_octet(self.FRAME_END) def read_0_10(self): c = self.codec flags = c.decode_octet() # TODO: currently ignoring flags framing_version = (flags & 0xc0) >> 6 if framing_version != 0: raise "frame error: unknown framing version" type = self.spec.constants.byid[c.decode_octet()].name frame_size = c.decode_short() if frame_size < 12: # TODO: Magic number (frame header size) raise "frame error: frame size too small" reserved1 = c.decode_octet() field = c.decode_octet() subchannel = field & 0x0f channel = c.decode_short() reserved2 = c.decode_long() # TODO: reserved maybe need to ensure 0 if (flags & 0x30) != 0 or reserved1 != 0 or (field & 0xf0) != 0: raise "frame error: reserved bits not all zero" body_size = frame_size - 12 # TODO: Magic number (frame header size) body = c.read(body_size) dec = codec.Codec(StringIO(body), self.spec) try: frame = Frame.DECODERS[type].decode(self.spec, dec, len(body)) except EOF: raise "truncated frame body: %r" % body frame.channel = channel frame.subchannel = subchannel end = c.decode_octet() if end != self.FRAME_END: garbage = "" while end != self.FRAME_END: garbage += chr(end) end = c.decode_octet() raise "frame error: expected %r, got %r" % (self.FRAME_END, garbage) return frame def write_99_0(self, frame): self.write_0_10(frame) def read_99_0(self): return self.read_0_10() class Frame: DECODERS = {} class __metaclass__(type): def __new__(cls, name, bases, dict): for attr in ("encode", "decode", "type"): if not dict.has_key(attr): raise TypeError("%s must define %s" % (name, attr)) dict["decode"] = staticmethod(dict["decode"]) if dict.has_key("__init__"): __init__ = dict["__init__"] def init(self, *args, **kwargs): args = list(args) self.init(args, kwargs) __init__(self, *args, **kwargs) dict["__init__"] = init t = type.__new__(cls, name, bases, dict) if t.type != None: Frame.DECODERS[t.type] = t return t type = None def init(self, args, kwargs): self.channel = kwargs.pop("channel", 0) self.subchannel = kwargs.pop("subchannel", 0) self.bos = True self.eos = True self.bof = True self.eof = True def encode(self, enc): abstract def decode(spec, dec, size): abstract class Method(Frame): type = "frame_method" def __init__(self, method, args): if len(args) != len(method.fields): argspec = ["%s: %s" % (f.name, f.type) for f in method.fields] raise TypeError("%s.%s expecting (%s), got %s" % (method.klass.name, method.name, ", ".join(argspec), args)) self.method = method self.method_type = method self.args = args self.eof = not method.content def encode(self, c): version = (c.spec.major, c.spec.minor) if version == (0, 10) or version == (99, 0): c.encode_octet(self.method.klass.id) c.encode_octet(self.method.id) else: c.encode_short(self.method.klass.id) c.encode_short(self.method.id) for field, arg in zip(self.method.fields, self.args): c.encode(field.type, arg) def decode(spec, c, size): version = (c.spec.major, c.spec.minor) if version == (0, 10) or version == (99, 0): klass = spec.classes.byid[c.decode_octet()] meth = klass.methods.byid[c.decode_octet()] else: klass = spec.classes.byid[c.decode_short()] meth = klass.methods.byid[c.decode_short()] args = tuple([c.decode(f.type) for f in meth.fields]) return Method(meth, args) def __str__(self): return "[%s] %s %s" % (self.channel, self.method, ", ".join([str(a) for a in self.args])) class Request(Frame): type = "frame_request" def __init__(self, id, response_mark, method): self.id = id self.response_mark = response_mark self.method = method self.method_type = method.method_type self.args = method.args def encode(self, enc): enc.encode_longlong(self.id) enc.encode_longlong(self.response_mark) # reserved enc.encode_long(0) self.method.encode(enc) def decode(spec, dec, size): id = dec.decode_longlong() mark = dec.decode_longlong() # reserved dec.decode_long() method = Method.decode(spec, dec, size - 20) return Request(id, mark, method) def __str__(self): return "[%s] Request(%s) %s" % (self.channel, self.id, self.method) class Response(Frame): type = "frame_response" def __init__(self, id, request_id, batch_offset, method): self.id = id self.request_id = request_id self.batch_offset = batch_offset self.method = method self.method_type = method.method_type self.args = method.args def encode(self, enc): enc.encode_longlong(self.id) enc.encode_longlong(self.request_id) enc.encode_long(self.batch_offset) self.method.encode(enc) def decode(spec, dec, size): id = dec.decode_longlong() request_id = dec.decode_longlong() batch_offset = dec.decode_long() method = Method.decode(spec, dec, size - 20) return Response(id, request_id, batch_offset, method) def __str__(self): return "[%s] Response(%s,%s,%s) %s" % (self.channel, self.id, self.request_id, self.batch_offset, self.method) def uses_struct_encoding(spec): return (spec.major == 0 and spec.minor == 10) or (spec.major == 99 and spec.minor == 0) class Header(Frame): type = "frame_header" def __init__(self, klass, weight, size, properties): self.klass = klass self.weight = weight self.size = size self.properties = properties self.eof = size == 0 self.bof = False def __getitem__(self, name): return self.properties[name] def __setitem__(self, name, value): self.properties[name] = value def __delitem__(self, name): del self.properties[name] def encode(self, c): if uses_struct_encoding(c.spec): self.encode_structs(c) else: self.encode_legacy(c) def encode_structs(self, c): # XXX structs = [qpid.Struct(c.spec.domains.byname["delivery_properties"].type), qpid.Struct(c.spec.domains.byname["message_properties"].type)] # XXX props = self.properties.copy() for k in self.properties: for s in structs: if s.exists(k): s.set(k, props.pop(k)) if props: raise TypeError("no such property: %s" % (", ".join(props))) # message properties store the content-length now, and weight is # deprecated if self.size != None: structs[1].content_length = self.size for s in structs: c.encode_long_struct(s) def encode_legacy(self, c): c.encode_short(self.klass.id) c.encode_short(self.weight) c.encode_longlong(self.size) # property flags nprops = len(self.klass.fields) flags = 0 for i in range(nprops): f = self.klass.fields.items[i] flags <<= 1 if self.properties.get(f.name) != None: flags |= 1 # the last bit indicates more flags if i > 0 and (i % 15) == 0: flags <<= 1 if nprops > (i + 1): flags |= 1 c.encode_short(flags) flags = 0 flags <<= ((16 - (nprops % 15)) % 16) c.encode_short(flags) # properties for f in self.klass.fields: v = self.properties.get(f.name) if v != None: c.encode(f.type, v) def decode(spec, c, size): if uses_struct_encoding(spec): return Header.decode_structs(spec, c, size) else: return Header.decode_legacy(spec, c, size) def decode_structs(spec, c, size): structs = [] start = c.nread while c.nread - start < size: structs.append(c.decode_long_struct()) # XXX props = {} length = None for s in structs: for f in s.type.fields: if s.has(f.name): props[f.name] = s.get(f.name) if f.name == "content_length": length = s.get(f.name) return Header(None, 0, length, props) decode_structs = staticmethod(decode_structs) def decode_legacy(spec, c, size): klass = spec.classes.byid[c.decode_short()] weight = c.decode_short() size = c.decode_longlong() # property flags bits = [] while True: flags = c.decode_short() for i in range(15, 0, -1): if flags >> i & 0x1 != 0: bits.append(True) else: bits.append(False) if flags & 0x1 == 0: break # properties properties = {} for b, f in zip(bits, klass.fields): if b: # Note: decode returns a unicode u'' string but only # plain '' strings can be used as keywords so we need to # stringify the names. properties[str(f.name)] = c.decode(f.type) return Header(klass, weight, size, properties) decode_legacy = staticmethod(decode_legacy) def __str__(self): return "%s %s %s %s" % (self.klass, self.weight, self.size, self.properties) class Body(Frame): type = "frame_body" def __init__(self, content): self.content = content self.eof = True self.bof = False def encode(self, enc): enc.write(self.content) def decode(spec, dec, size): return Body(dec.read(size)) def __str__(self): return "Body(%r)" % self.content # TODO: # OOB_METHOD = "frame_oob_method" # OOB_HEADER = "frame_oob_header" # OOB_BODY = "frame_oob_body" # TRACE = "frame_trace" # HEARTBEAT = "frame_heartbeat" qpid-python-0.22/python/qpid/codec.py0000644000175000017500000003407111003407066015700 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ Utility code to translate between python objects and AMQP encoded data fields. The unit test for this module is located in tests/codec.py """ import re, qpid, spec08 from cStringIO import StringIO from struct import * from reference import ReferenceId class EOF(Exception): pass TYPE_ALIASES = { "long_string": "longstr", "unsigned_int": "long" } class Codec: """ class that handles encoding/decoding of AMQP primitives """ def __init__(self, stream, spec): """ initializing the stream/fields used """ self.stream = stream self.spec = spec self.nwrote = 0 self.nread = 0 self.incoming_bits = [] self.outgoing_bits = [] self.types = {} self.codes = {} self.encodings = { basestring: "longstr", int: "long", long: "long", None.__class__:"void", list: "sequence", tuple: "sequence", dict: "table" } for constant in self.spec.constants: if constant.klass == "field-table-type": type = constant.name.replace("field_table_", "") self.typecode(constant.id, TYPE_ALIASES.get(type, type)) if not self.types: self.typecode(ord('S'), "longstr") self.typecode(ord('I'), "long") def typecode(self, code, type): self.types[code] = type self.codes[type] = code def resolve(self, klass): if self.encodings.has_key(klass): return self.encodings[klass] for base in klass.__bases__: result = self.resolve(base) if result != None: return result def read(self, n): """ reads in 'n' bytes from the stream. Can raise EOF exception """ self.clearbits() data = self.stream.read(n) if n > 0 and len(data) == 0: raise EOF() self.nread += len(data) return data def write(self, s): """ writes data 's' to the stream """ self.flushbits() self.stream.write(s) self.nwrote += len(s) def flush(self): """ flushes the bits and data present in the stream """ self.flushbits() self.stream.flush() def flushbits(self): """ flushes the bits(compressed into octets) onto the stream """ if len(self.outgoing_bits) > 0: bytes = [] index = 0 for b in self.outgoing_bits: if index == 0: bytes.append(0) if b: bytes[-1] |= 1 << index index = (index + 1) % 8 del self.outgoing_bits[:] for byte in bytes: self.encode_octet(byte) def clearbits(self): if self.incoming_bits: self.incoming_bits = [] def pack(self, fmt, *args): """ packs the data 'args' as per the format 'fmt' and writes it to the stream """ self.write(pack(fmt, *args)) def unpack(self, fmt): """ reads data from the stream and unpacks it as per the format 'fmt' """ size = calcsize(fmt) data = self.read(size) values = unpack(fmt, data) if len(values) == 1: return values[0] else: return values def encode(self, type, value): """ calls the appropriate encode function e.g. encode_octet, encode_short etc. """ if isinstance(type, spec08.Struct): self.encode_struct(type, value) else: getattr(self, "encode_" + type)(value) def decode(self, type): """ calls the appropriate decode function e.g. decode_octet, decode_short etc. """ if isinstance(type, spec08.Struct): return self.decode_struct(type) else: return getattr(self, "decode_" + type)() def encode_bit(self, o): """ encodes a bit """ if o: self.outgoing_bits.append(True) else: self.outgoing_bits.append(False) def decode_bit(self): """ decodes a bit """ if len(self.incoming_bits) == 0: bits = self.decode_octet() for i in range(8): self.incoming_bits.append(bits >> i & 1 != 0) return self.incoming_bits.pop(0) def encode_octet(self, o): """ encodes octet (8 bits) data 'o' in network byte order """ # octet's valid range is [0,255] if (o < 0 or o > 255): raise ValueError('Valid range of octet is [0,255]') self.pack("!B", int(o)) def decode_octet(self): """ decodes a octet (8 bits) encoded in network byte order """ return self.unpack("!B") def encode_short(self, o): """ encodes short (16 bits) data 'o' in network byte order """ # short int's valid range is [0,65535] if (o < 0 or o > 65535): raise ValueError('Valid range of short int is [0,65535]: %s' % o) self.pack("!H", int(o)) def decode_short(self): """ decodes a short (16 bits) in network byte order """ return self.unpack("!H") def encode_long(self, o): """ encodes long (32 bits) data 'o' in network byte order """ # we need to check both bounds because on 64 bit platforms # struct.pack won't raise an error if o is too large if (o < 0 or o > 4294967295): raise ValueError('Valid range of long int is [0,4294967295]') self.pack("!L", int(o)) def decode_long(self): """ decodes a long (32 bits) in network byte order """ return self.unpack("!L") def encode_signed_long(self, o): self.pack("!q", o) def decode_signed_long(self): return self.unpack("!q") def encode_signed_int(self, o): self.pack("!l", o) def decode_signed_int(self): return self.unpack("!l") def encode_longlong(self, o): """ encodes long long (64 bits) data 'o' in network byte order """ self.pack("!Q", o) def decode_longlong(self): """ decodes a long long (64 bits) in network byte order """ return self.unpack("!Q") def encode_float(self, o): self.pack("!f", o) def decode_float(self): return self.unpack("!f") def encode_double(self, o): self.pack("!d", o) def decode_double(self): return self.unpack("!d") def encode_bin128(self, b): for idx in range (0,16): self.pack("!B", ord (b[idx])) def decode_bin128(self): result = "" for idx in range (0,16): result = result + chr (self.unpack("!B")) return result def encode_raw(self, len, b): for idx in range (0,len): self.pack("!B", b[idx]) def decode_raw(self, len): result = "" for idx in range (0,len): result = result + chr (self.unpack("!B")) return result def enc_str(self, fmt, s): """ encodes a string 's' in network byte order as per format 'fmt' """ size = len(s) self.pack(fmt, size) self.write(s) def dec_str(self, fmt): """ decodes a string in network byte order as per format 'fmt' """ size = self.unpack(fmt) return self.read(size) def encode_shortstr(self, s): """ encodes a short string 's' in network byte order """ # short strings are limited to 255 octets if len(s) > 255: raise ValueError('Short strings are limited to 255 octets') self.enc_str("!B", s) def decode_shortstr(self): """ decodes a short string in network byte order """ return self.dec_str("!B") def encode_longstr(self, s): """ encodes a long string 's' in network byte order """ if isinstance(s, dict): self.encode_table(s) else: self.enc_str("!L", s) def decode_longstr(self): """ decodes a long string 's' in network byte order """ return self.dec_str("!L") def encode_table(self, tbl): """ encodes a table data structure in network byte order """ enc = StringIO() codec = Codec(enc, self.spec) if tbl: for key, value in tbl.items(): if self.spec.major == 8 and self.spec.minor == 0 and len(key) > 128: raise ValueError("field table key too long: '%s'" % key) type = self.resolve(value.__class__) if type == None: raise ValueError("no encoding for: " + value.__class__) codec.encode_shortstr(key) codec.encode_octet(self.codes[type]) codec.encode(type, value) s = enc.getvalue() self.encode_long(len(s)) self.write(s) def decode_table(self): """ decodes a table data structure in network byte order """ size = self.decode_long() start = self.nread result = {} while self.nread - start < size: key = self.decode_shortstr() code = self.decode_octet() if self.types.has_key(code): value = self.decode(self.types[code]) else: w = width(code) if fixed(code): value = self.read(w) else: value = self.read(self.dec_num(w)) result[key] = value return result def encode_timestamp(self, t): """ encodes a timestamp data structure in network byte order """ self.encode_longlong(t) def decode_timestamp(self): """ decodes a timestamp data structure in network byte order """ return self.decode_longlong() def encode_content(self, s): """ encodes a content data structure in network byte order content can be passed as a string in which case it is assumed to be inline data, or as an instance of ReferenceId indicating it is a reference id """ if isinstance(s, ReferenceId): self.encode_octet(1) self.encode_longstr(s.id) else: self.encode_octet(0) self.encode_longstr(s) def decode_content(self): """ decodes a content data structure in network byte order return a string for inline data and a ReferenceId instance for references """ type = self.decode_octet() if type == 0: return self.decode_longstr() else: return ReferenceId(self.decode_longstr()) # new domains for 0-10: def encode_rfc1982_long(self, s): self.encode_long(s) def decode_rfc1982_long(self): return self.decode_long() def encode_rfc1982_long_set(self, s): self.encode_short(len(s) * 4) for i in s: self.encode_long(i) def decode_rfc1982_long_set(self): count = self.decode_short() / 4 set = [] for i in range(0, count): set.append(self.decode_long()) return set; def encode_uuid(self, s): self.pack("16s", s) def decode_uuid(self): return self.unpack("16s") def enc_num(self, width, n): if width == 1: self.encode_octet(n) elif width == 2: self.encode_short(n) elif width == 3: self.encode_long(n) else: raise ValueError("invalid width: %s" % width) def dec_num(self, width): if width == 1: return self.decode_octet() elif width == 2: return self.decode_short() elif width == 4: return self.decode_long() else: raise ValueError("invalid width: %s" % width) def encode_struct(self, type, s): if type.size: enc = StringIO() codec = Codec(enc, self.spec) codec.encode_struct_body(type, s) codec.flush() body = enc.getvalue() self.enc_num(type.size, len(body)) self.write(body) else: self.encode_struct_body(type, s) def decode_struct(self, type): if type.size: size = self.dec_num(type.size) if size == 0: return None return self.decode_struct_body(type) def encode_struct_body(self, type, s): reserved = 8*type.pack - len(type.fields) assert reserved >= 0 for f in type.fields: if s == None: self.encode_bit(False) elif f.type == "bit": self.encode_bit(s.get(f.name)) else: self.encode_bit(s.has(f.name)) for i in range(reserved): self.encode_bit(False) for f in type.fields: if f.type != "bit" and s != None and s.has(f.name): self.encode(f.type, s.get(f.name)) self.flush() def decode_struct_body(self, type): reserved = 8*type.pack - len(type.fields) assert reserved >= 0 s = qpid.Struct(type) for f in type.fields: if f.type == "bit": s.set(f.name, self.decode_bit()) elif self.decode_bit(): s.set(f.name, None) for i in range(reserved): if self.decode_bit(): raise ValueError("expecting reserved flag") for f in type.fields: if f.type != "bit" and s.has(f.name): s.set(f.name, self.decode(f.type)) self.clearbits() return s def encode_long_struct(self, s): enc = StringIO() codec = Codec(enc, self.spec) type = s.type codec.encode_short(type.type) codec.encode_struct_body(type, s) self.encode_longstr(enc.getvalue()) def decode_long_struct(self): codec = Codec(StringIO(self.decode_longstr()), self.spec) type = self.spec.structs[codec.decode_short()] return codec.decode_struct_body(type) def decode_array(self): size = self.decode_long() code = self.decode_octet() count = self.decode_long() result = [] for i in range(0, count): if self.types.has_key(code): value = self.decode(self.types[code]) else: w = width(code) if fixed(code): value = self.read(w) else: value = self.read(self.dec_num(w)) result.append(value) return result def fixed(code): return (code >> 6) != 2 def width(code): # decimal if code >= 192: decsel = (code >> 4) & 3 if decsel == 0: return 5 elif decsel == 1: return 9 elif decsel == 3: return 0 else: raise ValueError(code) # variable width elif code < 192 and code >= 128: lenlen = (code >> 4) & 3 if lenlen == 3: raise ValueError(code) return 2 ** lenlen # fixed width else: return (code >> 4) & 7 qpid-python-0.22/python/qpid/spec08.py0000644000175000017500000003417311013327424015730 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ This module loads protocol metadata into python objects. It provides access to spec metadata via a python object model, and can also dynamically creating python methods, classes, and modules based on the spec metadata. All the generated methods have proper signatures and doc strings based on the spec metadata so the python help system can be used to browse the spec documentation. The generated methods all dispatch to the self.invoke(meth, args) callback of the containing class so that the generated code can be reused in a variety of situations. """ import re, new, mllib, qpid from util import fill class SpecContainer: def __init__(self): self.items = [] self.byname = {} self.byid = {} self.indexes = {} def add(self, item): if self.byname.has_key(item.name): raise ValueError("duplicate name: %s" % item) if item.id == None: item.id = len(self) elif self.byid.has_key(item.id): raise ValueError("duplicate id: %s" % item) self.indexes[item] = len(self.items) self.items.append(item) self.byname[item.name] = item self.byid[item.id] = item def index(self, item): try: return self.indexes[item] except KeyError: raise ValueError(item) def __iter__(self): return iter(self.items) def __len__(self): return len(self.items) class Metadata: PRINT = [] def __init__(self): pass def __str__(self): args = map(lambda f: "%s=%s" % (f, getattr(self, f)), self.PRINT) return "%s(%s)" % (self.__class__.__name__, ", ".join(args)) def __repr__(self): return str(self) class Spec(Metadata): PRINT=["major", "minor", "file"] def __init__(self, major, minor, file): Metadata.__init__(self) self.major = major self.minor = minor self.file = file self.constants = SpecContainer() self.domains = SpecContainer() self.classes = SpecContainer() # methods indexed by classname_methname self.methods = {} # structs by type code self.structs = {} def post_load(self): self.module = self.define_module("amqp%s%s" % (self.major, self.minor)) self.klass = self.define_class("Amqp%s%s" % (self.major, self.minor)) def method(self, name): if not self.methods.has_key(name): for cls in self.classes: clen = len(cls.name) if name.startswith(cls.name) and name[clen] == "_": end = name[clen + 1:] if cls.methods.byname.has_key(end): self.methods[name] = cls.methods.byname[end] return self.methods.get(name) def parse_method(self, name): parts = re.split(r"\s*\.\s*", name) if len(parts) != 2: raise ValueError(name) klass, meth = parts return self.classes.byname[klass].methods.byname[meth] def struct(self, name, *args, **kwargs): type = self.domains.byname[name].type return qpid.Struct(type, *args, **kwargs) def define_module(self, name, doc = None): module = new.module(name, doc) module.__file__ = self.file for c in self.classes: cls = c.define_class(c.name) cls.__module__ = module.__name__ setattr(module, c.name, cls) return module def define_class(self, name): methods = {} for c in self.classes: for m in c.methods: meth = m.klass.name + "_" + m.name methods[meth] = m.define_method(meth) return type(name, (), methods) class Constant(Metadata): PRINT=["name", "id"] def __init__(self, spec, name, id, klass, docs): Metadata.__init__(self) self.spec = spec self.name = name self.id = id self.klass = klass self.docs = docs class Domain(Metadata): PRINT=["name", "type"] def __init__(self, spec, name, type, description, docs): Metadata.__init__(self) self.spec = spec self.id = None self.name = name self.type = type self.description = description self.docs = docs class Struct(Metadata): PRINT=["size", "type", "pack"] def __init__(self, size, type, pack): Metadata.__init__(self) self.size = size self.type = type self.pack = pack self.fields = SpecContainer() class Class(Metadata): PRINT=["name", "id"] def __init__(self, spec, name, id, handler, docs): Metadata.__init__(self) self.spec = spec self.name = name self.id = id self.handler = handler self.fields = SpecContainer() self.methods = SpecContainer() self.docs = docs def define_class(self, name): methods = {} for m in self.methods: methods[m.name] = m.define_method(m.name) return type(name, (), methods) class Method(Metadata): PRINT=["name", "id"] def __init__(self, klass, name, id, content, responses, result, synchronous, description, docs): Metadata.__init__(self) self.klass = klass self.name = name self.id = id self.content = content self.responses = responses self.result = result self.synchronous = synchronous self.fields = SpecContainer() self.description = description self.docs = docs self.response = False def is_l4_command(self): return self.klass.name not in ["execution", "channel", "connection", "session"] def arguments(self, *args, **kwargs): nargs = len(args) + len(kwargs) maxargs = len(self.fields) if nargs > maxargs: self._type_error("takes at most %s arguments (%s) given", maxargs, nargs) result = [] for f in self.fields: idx = self.fields.index(f) if idx < len(args): result.append(args[idx]) elif kwargs.has_key(f.name): result.append(kwargs.pop(f.name)) else: result.append(Method.DEFAULTS[f.type]) for key, value in kwargs.items(): if self.fields.byname.has_key(key): self._type_error("got multiple values for keyword argument '%s'", key) else: self._type_error("got an unexpected keyword argument '%s'", key) return tuple(result) def _type_error(self, msg, *args): raise TypeError("%s %s" % (self.name, msg % args)) def docstring(self): s = "\n\n".join([fill(d, 2) for d in [self.description] + self.docs]) for f in self.fields: if f.docs: s += "\n\n" + "\n\n".join([fill(f.docs[0], 4, f.name)] + [fill(d, 4) for d in f.docs[1:]]) if self.responses: s += "\n\nValid responses: " for r in self.responses: s += r.name + " " return s METHOD = "__method__" DEFAULTS = {"bit": False, "shortstr": "", "longstr": "", "table": {}, "array": [], "octet": 0, "short": 0, "long": 0, "longlong": 0, "timestamp": 0, "content": None, "uuid": "", "rfc1982_long": 0, "rfc1982_long_set": [], "long_struct": None} def define_method(self, name): g = {Method.METHOD: self} l = {} args = [(f.name, Method.DEFAULTS[f.type]) for f in self.fields] methargs = args[:] if self.content: args += [("content", None)] code = "def %s(self, %s):\n" % \ (name, ", ".join(["%s = %r" % a for a in args])) code += " %r\n" % self.docstring() argnames = ", ".join([a[0] for a in methargs]) code += " return self.invoke(%s" % Method.METHOD if argnames: code += ", (%s,)" % argnames else: code += ", ()" if self.content: code += ", content" code += ")" exec code in g, l return l[name] class Field(Metadata): PRINT=["name", "id", "type"] def __init__(self, name, id, type, domain, description, docs): Metadata.__init__(self) self.name = name self.id = id self.type = type self.domain = domain self.description = description self.docs = docs def default(self): if isinstance(self.type, Struct): return None else: return Method.DEFAULTS[self.type] WIDTHS = { "octet": 1, "short": 2, "long": 4 } def width(st, default=None): if st in (None, "none", ""): return default else: return WIDTHS[st] def get_result(nd, spec): result = nd["result"] if not result: return None name = result["@domain"] if name != None: return spec.domains.byname[name] st_nd = result["struct"] st = Struct(width(st_nd["@size"]), int(result.parent.parent["@index"])*256 + int(st_nd["@type"]), width(st_nd["@pack"], 2)) spec.structs[st.type] = st load_fields(st_nd, st.fields, spec.domains.byname) return st def get_desc(nd): label = nd["@label"] if not label: label = nd.text() if label: label = label.strip() return label def get_docs(nd): return [n.text() for n in nd.query["doc"]] def load_fields(nd, l, domains): for f_nd in nd.query["field"]: type = f_nd["@domain"] if type == None: type = f_nd["@type"] type = pythonize(type) domain = None while domains.has_key(type) and domains[type].type != type: domain = domains[type] type = domain.type l.add(Field(pythonize(f_nd["@name"]), f_nd.index(), type, domain, get_desc(f_nd), get_docs(f_nd))) def load(specfile, *errata): doc = mllib.xml_parse(specfile) spec_root = doc["amqp"] spec = Spec(int(spec_root["@major"]), int(spec_root["@minor"]), specfile) for root in [spec_root] + map(lambda x: mllib.xml_parse(x)["amqp"], errata): # constants for nd in root.query["constant"]: val = nd["@value"] if val.startswith("0x"): val = int(val, 16) else: val = int(val) const = Constant(spec, pythonize(nd["@name"]), val, nd["@class"], get_docs(nd)) try: spec.constants.add(const) except ValueError, e: pass #print "Warning:", e # domains are typedefs structs = [] for nd in root.query["domain"]: type = nd["@type"] if type == None: st_nd = nd["struct"] code = st_nd["@type"] if code not in (None, "", "none"): code = int(code) type = Struct(width(st_nd["@size"]), code, width(st_nd["@pack"], 2)) if type.type != None: spec.structs[type.type] = type structs.append((type, st_nd)) else: type = pythonize(type) domain = Domain(spec, pythonize(nd["@name"]), type, get_desc(nd), get_docs(nd)) spec.domains.add(domain) # structs for st, st_nd in structs: load_fields(st_nd, st.fields, spec.domains.byname) # classes for c_nd in root.query["class"]: cname = pythonize(c_nd["@name"]) if spec.classes.byname.has_key(cname): klass = spec.classes.byname[cname] else: klass = Class(spec, cname, int(c_nd["@index"]), c_nd["@handler"], get_docs(c_nd)) spec.classes.add(klass) added_methods = [] load_fields(c_nd, klass.fields, spec.domains.byname) for m_nd in c_nd.query["method"]: mname = pythonize(m_nd["@name"]) if klass.methods.byname.has_key(mname): meth = klass.methods.byname[mname] else: meth = Method(klass, mname, int(m_nd["@index"]), m_nd["@content"] == "1", [pythonize(nd["@name"]) for nd in m_nd.query["response"]], get_result(m_nd, spec), m_nd["@synchronous"] == "1", get_desc(m_nd), get_docs(m_nd)) klass.methods.add(meth) added_methods.append(meth) load_fields(m_nd, meth.fields, spec.domains.byname) # resolve the responses for m in added_methods: m.responses = [klass.methods.byname[r] for r in m.responses] for resp in m.responses: resp.response = True spec.post_load() return spec REPLACE = {" ": "_", "-": "_"} KEYWORDS = {"global": "global_", "return": "return_"} def pythonize(name): name = str(name) for key, val in REPLACE.items(): name = name.replace(key, val) try: name = KEYWORDS[name] except KeyError: pass return name class Rule(Metadata): PRINT = ["text", "implement", "tests"] def __init__(self, text, implement, tests, path): self.text = text self.implement = implement self.tests = tests self.path = path def find_rules(node, rules): if node.name == "rule": rules.append(Rule(node.text, node.get("@implement"), [ch.text for ch in node if ch.name == "test"], node.path())) if node.name == "doc" and node.get("@name") == "rule": tests = [] if node.has("@test"): tests.append(node["@test"]) rules.append(Rule(node.text, None, tests, node.path())) for child in node: find_rules(child, rules) def load_rules(specfile): rules = [] find_rules(xmlutil.parse(specfile), rules) return rules def test_summary(): template = """ AMQP Tests %s
""" rows = [] for rule in load_rules("amqp.org/specs/amqp7.xml"): if rule.tests: tests = ", ".join(rule.tests) else: tests = " " rows.append('Path: %s' 'Implement: %s' 'Tests: %s' % (rule.path[len("/root/amqp"):], rule.implement, tests)) rows.append('%s' % rule.text) rows.append(' ') print template % "\n".join(rows) qpid-python-0.22/python/RELEASE_NOTES0000644000175000017500000000075011472553406015276 0ustar mbambaApache Qpid Python 0.8 Release Notes ------------------------------ The Qpid 0.8 release of the python client contains support the for both 0-8 and 0-10 of the AMQP specification as well as support for the non-WIP portion of the 0-9 specification. You can access these specifications from: http://www.amqp.org/confluence/display/AMQP/AMQP+Specification The README file provided contains some details on installing and using the python client that is included with this distribution. qpid-python-0.22/python/LICENSE.txt0000644000175000017500000002613711211266575015154 0ustar mbamba Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. qpid-python-0.22/python/MANIFEST.in0000644000175000017500000000011111336167172015050 0ustar mbambarecursive-include examples * recursive-exclude examples verify verify.in qpid-python-0.22/python/qpid-python-test-ant.xml0000644000175000017500000001523211716760326020061 0ustar mbamba Starting Qpid with ${qpid.executable} ${qpid.executable.args} Stopping Qpid ${pid} Killing Qpid ${pid} Running test-suite qpid-python-0.22/python/setup.py0000755000175000017500000002332012142746464015041 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import os, re, sys, string from distutils.core import setup, Command from distutils.command.build import build as _build from distutils.command.build_py import build_py as _build_py from distutils.command.clean import clean as _clean from distutils.command.install_lib import install_lib as _install_lib from distutils.dep_util import newer from distutils.dir_util import remove_tree from distutils.dist import Distribution from distutils.errors import DistutilsFileError, DistutilsOptionError from distutils import log from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE MAJOR, MINOR = sys.version_info[0:2] class preprocessor: def copy_file(self, src, dst, preserve_mode=1, preserve_times=1, link=None, level=1): name, actor = self.actor(src, dst) if actor: if not os.path.isfile(src): raise DistutilsFileError, \ "can't copy '%s': doesn't exist or not a regular file" % src if os.path.isdir(dst): dir = dst dst = os.path.join(dst, os.path.basename(src)) else: dir = os.path.dirname(dst) if not self.force and not newer(src, dst): return dst, 0 if os.path.basename(dst) == os.path.basename(src): log.info("%s %s -> %s", name, src, dir) else: log.info("%s %s -> %s", name, src, dst) if self.dry_run: return (dst, 1) else: try: fsrc = open(src, 'rb') except os.error, (errno, errstr): raise DistutilsFileError, \ "could not open '%s': %s" % (src, errstr) if os.path.exists(dst): try: os.unlink(dst) except os.error, (errno, errstr): raise DistutilsFileError, \ "could not delete '%s': %s" % (dst, errstr) try: fdst = open(dst, 'wb') except os.error, (errno, errstr): raise DistutilsFileError, \ "could not create '%s': %s" % (dst, errstr) try: fdst.write(actor(fsrc.read())) finally: fsrc.close() fdst.close() if preserve_mode or preserve_times: st = os.stat(src) if preserve_times: os.utime(dst, (st[ST_ATIME], st[ST_MTIME])) if preserve_mode: os.chmod(dst, S_IMODE(st[ST_MODE])) return (dst, 1) else: return Command.copy_file(self, src, dst, preserve_mode, preserve_times, link, level) doc_option = [('build-doc', None, 'build directory for documentation')] class build(_build): user_options = _build.user_options + doc_option def initialize_options(self): _build.initialize_options(self) self.build_doc = None def finalize_options(self): _build.finalize_options(self) if self.build_doc is None: self.build_doc = "%s/doc" % self.build_base def get_sub_commands(self): return _build.get_sub_commands(self) + ["build_doc"] class build_doc(Command): user_options = doc_option def initialize_options(self): self.build_doc = None def finalize_options(self): self.set_undefined_options('build', ('build_doc', 'build_doc')) def run(self): try: from epydoc.docbuilder import build_doc_index from epydoc.docwriter.html import HTMLWriter except ImportError, e: log.warn('%s -- skipping build_doc', e) return names = ["qpid.messaging"] doc_index = build_doc_index(names, True, True) html_writer = HTMLWriter(doc_index) self.mkpath(self.build_doc) log.info('epydoc %s to %s' % (", ".join(names), self.build_doc)) html_writer.write(self.build_doc) class clean(_clean): user_options = _clean.user_options + doc_option def initialize_options(self): _clean.initialize_options(self) self.build_doc = None def finalize_options(self): _clean.finalize_options(self) self.set_undefined_options('build', ('build_doc', 'build_doc')) def run(self): if self.all: if os.path.exists(self.build_doc): remove_tree(self.build_doc, dry_run=self.dry_run) else: log.debug("%s doesn't exist -- can't clean it", self.build_doc) _clean.run(self) if MAJOR <= 2 and MINOR <= 3: from glob import glob from distutils.util import convert_path class distclass(Distribution): def __init__(self, *args, **kwargs): self.package_data = None Distribution.__init__(self, *args, **kwargs) else: distclass = Distribution ann = re.compile(r"([ \t]*)@([_a-zA-Z][_a-zA-Z0-9]*)([ \t\n\r]+def[ \t]+)([_a-zA-Z][_a-zA-Z0-9]*)") line = re.compile(r"\n([ \t]*)[^ \t\n#]+") class build_py(preprocessor, _build_py): if MAJOR <= 2 and MINOR <= 3: def initialize_options(self): _build_py.initialize_options(self) self.package_data = None def finalize_options(self): _build_py.finalize_options(self) self.package_data = self.distribution.package_data self.data_files = self.get_data_files() def get_data_files (self): data = [] if not self.packages: return data for package in self.packages: # Locate package source directory src_dir = self.get_package_dir(package) # Compute package build directory build_dir = os.path.join(*([self.build_lib] + package.split('.'))) # Length of path to strip from found files plen = 0 if src_dir: plen = len(src_dir)+1 # Strip directory from globbed filenames filenames = [file[plen:] for file in self.find_data_files(package, src_dir)] data.append((package, src_dir, build_dir, filenames)) return data def find_data_files (self, package, src_dir): globs = (self.package_data.get('', []) + self.package_data.get(package, [])) files = [] for pattern in globs: # Each pattern has to be converted to a platform-specific path filelist = glob(os.path.join(src_dir, convert_path(pattern))) # Files that match more than one pattern are only added once files.extend([fn for fn in filelist if fn not in files]) return files def build_package_data (self): lastdir = None for package, src_dir, build_dir, filenames in self.data_files: for filename in filenames: target = os.path.join(build_dir, filename) self.mkpath(os.path.dirname(target)) self.copy_file(os.path.join(src_dir, filename), target, preserve_mode=False) def build_packages(self): _build_py.build_packages(self) self.build_package_data() # end if MAJOR <= 2 and MINOR <= 3 def backport(self, input): output = "" pos = 0 while True: m = ann.search(input, pos) if m: indent, decorator, idef, function = m.groups() output += input[pos:m.start()] output += "%s#@%s%s%s" % (indent, decorator, idef, function) pos = m.end() subst = "\n%s%s = %s(%s)\n" % (indent, function, decorator, function) npos = pos while True: n = line.search(input, npos) if not n: input += subst break if len(n.group(1)) <= len(indent): idx = n.start() input = input[:idx] + subst + input[idx:] break npos = n.end() else: break output += input[pos:] return output def actor(self, src, dst): base, ext = os.path.splitext(src) if ext == ".py" and MAJOR <= 2 and MINOR <= 3: return "backporting", self.backport else: return None, None def pclfile(xmlfile): return "%s.pcl" % os.path.splitext(xmlfile)[0] class install_lib(_install_lib): def get_outputs(self): outputs = _install_lib.get_outputs(self) extra = [] for of in outputs: if os.path.basename(of) == "amqp-0-10-qpid-errata-stripped.xml": extra.append(pclfile(of)) return outputs + extra def install(self): outfiles = _install_lib.install(self) extra = [] for of in outfiles: if os.path.basename(of) == "amqp-0-10-qpid-errata-stripped.xml": tgt = pclfile(of) if self.force or newer(of, tgt): log.info("caching %s to %s" % (of, os.path.basename(tgt))) if not self.dry_run: from qpid.ops import load_types load_types(of) extra.append(tgt) return outfiles + extra setup(name="qpid-python", version="0.22", author="Apache Qpid", author_email="dev@qpid.apache.org", packages=["mllib", "qpid", "qpid.messaging", "qpid.tests", "qpid.tests.messaging"], package_data={"qpid": ["specs/*.dtd", "specs/*.xml"]}, scripts=["qpid-python-test"], url="http://qpid.apache.org/", license="Apache Software License", description="Python client implementation for Apache Qpid", cmdclass={"build": build, "build_py": build_py, "build_doc": build_doc, "clean": clean, "install_lib": install_lib}, distclass=distclass) qpid-python-0.22/python/mllib/0000755000175000017500000000000012151237730014412 5ustar mbambaqpid-python-0.22/python/mllib/dom.py0000644000175000017500000001454111240305576015552 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ Simple DOM for both SGML and XML documents. """ from __future__ import division from __future__ import generators from __future__ import nested_scopes import transforms class Container: def __init__(self): self.children = [] def add(self, child): child.parent = self self.children.append(child) def extend(self, children): for child in children: child.parent = self self.children.append(child) class Component: def __init__(self): self.parent = None def index(self): if self.parent: return self.parent.children.index(self) else: return 0 def _line(self, file, line, column): self.file = file self.line = line self.column = column class DispatchError(Exception): def __init__(self, scope, f): msg = "no such attribtue" class Dispatcher: def is_type(self, type): cls = self while cls != None: if cls.type == type: return True cls = cls.base return False def dispatch(self, f, attrs = ""): cls = self while cls != None: if hasattr(f, cls.type): return getattr(f, cls.type)(self) else: cls = cls.base cls = self while cls != None: if attrs: sep = ", " if cls.base == None: sep += "or " else: sep = "" attrs += "%s'%s'" % (sep, cls.type) cls = cls.base raise AttributeError("'%s' object has no attribute %s" % (f.__class__.__name__, attrs)) class Node(Container, Component, Dispatcher): type = "node" base = None def __init__(self): Container.__init__(self) Component.__init__(self) self.query = Query([self]) def __getitem__(self, name): for nd in self.query[name]: return nd def text(self): return self.dispatch(transforms.Text()) def tag(self, name, *attrs, **kwargs): t = Tag(name, *attrs, **kwargs) self.add(t) return t def data(self, s): d = Data(s) self.add(d) return d def entity(self, s): e = Entity(s) self.add(e) return e class Tree(Node): type = "tree" base = Node class Tag(Node): type = "tag" base = Node def __init__(self, _name, *attrs, **kwargs): Node.__init__(self) self.name = _name self.attrs = list(attrs) self.attrs.extend(kwargs.items()) self.singleton = False def get_attr(self, name): for k, v in self.attrs: if name == k: return v def _idx(self, attr): idx = 0 for k, v in self.attrs: if k == attr: return idx idx += 1 return None def set_attr(self, name, value): idx = self._idx(name) if idx is None: self.attrs.append((name, value)) else: self.attrs[idx] = (name, value) def dispatch(self, f): try: attr = "do_" + self.name method = getattr(f, attr) except AttributeError: return Dispatcher.dispatch(self, f, "'%s'" % attr) return method(self) class Leaf(Component, Dispatcher): type = "leaf" base = None def __init__(self, data): assert isinstance(data, basestring) self.data = data class Data(Leaf): type = "data" base = Leaf class Entity(Leaf): type = "entity" base = Leaf class Character(Leaf): type = "character" base = Leaf class Comment(Leaf): type = "comment" base = Leaf ################### ## Query Classes ## ########################################################################### class Adder: def __add__(self, other): return Sum(self, other) class Sum(Adder): def __init__(self, left, right): self.left = left self.right = right def __iter__(self): for x in self.left: yield x for x in self.right: yield x class View(Adder): def __init__(self, source): self.source = source class Filter(View): def __init__(self, predicate, source): View.__init__(self, source) self.predicate = predicate def __iter__(self): for nd in self.source: if self.predicate(nd): yield nd class Flatten(View): def __iter__(self): sources = [iter(self.source)] while sources: try: nd = sources[-1].next() if isinstance(nd, Tree): sources.append(iter(nd.children)) else: yield nd except StopIteration: sources.pop() class Children(View): def __iter__(self): for nd in self.source: for child in nd.children: yield child class Attributes(View): def __iter__(self): for nd in self.source: for a in nd.attrs: yield a class Values(View): def __iter__(self): for name, value in self.source: yield value def flatten_path(path): if isinstance(path, basestring): for part in path.split("/"): yield part elif callable(path): yield path else: for p in path: for fp in flatten_path(p): yield fp class Query(View): def __iter__(self): for nd in self.source: yield nd def __getitem__(self, path): query = self.source for p in flatten_path(path): if callable(p): select = Query pred = p source = query elif isinstance(p, basestring): if p[0] == "@": select = Values pred = lambda x, n=p[1:]: x[0] == n source = Attributes(query) elif p[0] == "#": select = Query pred = lambda x, t=p[1:]: x.is_type(t) source = Children(query) else: select = Query pred = lambda x, n=p: isinstance(x, Tag) and x.name == n source = Flatten(Children(query)) else: raise ValueError(p) query = select(Filter(pred, source)) return query qpid-python-0.22/python/mllib/transforms.py0000644000175000017500000000701010663102421017152 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ Useful transforms for dom objects. """ import dom from cStringIO import StringIO class Visitor: def descend(self, node): for child in node.children: child.dispatch(self) def node(self, node): self.descend(node) def leaf(self, leaf): pass class Identity: def descend(self, node): result = [] for child in node.children: result.append(child.dispatch(self)) return result def default(self, tag): result = dom.Tag(tag.name, *tag.attrs) result.extend(self.descend(tag)) return result def tree(self, tree): result = dom.Tree() result.extend(self.descend(tree)) return result def tag(self, tag): return self.default(tag) def leaf(self, leaf): return leaf.__class__(leaf.data) class Sexp(Identity): def __init__(self): self.stack = [] self.level = 0 self.out = "" def open(self, s): self.out += "(%s" % s self.level += len(s) + 1 self.stack.append(s) def line(self, s = ""): self.out = self.out.rstrip() self.out += "\n" + " "*self.level + s def close(self): s = self.stack.pop() self.level -= len(s) + 1 self.out = self.out.rstrip() self.out += ")" def tree(self, tree): self.open("+ ") for child in tree.children: self.line(); child.dispatch(self) self.close() def tag(self, tag): self.open("Node(%s) " % tag.name) for child in tag.children: self.line(); child.dispatch(self) self.close() def leaf(self, leaf): self.line("%s(%s)" % (leaf.__class__.__name__, leaf.data)) class Output: def descend(self, node): out = StringIO() for child in node.children: out.write(child.dispatch(self)) return out.getvalue() def default(self, tag): out = StringIO() out.write("<%s" % tag.name) for k, v in tag.attrs: out.write(' %s="%s"' % (k, v)) out.write(">") out.write(self.descend(tag)) if not tag.singleton: out.write("" % tag.name) return out.getvalue() def tree(self, tree): return self.descend(tree) def tag(self, tag): return self.default(tag) def data(self, leaf): return leaf.data def entity(self, leaf): return "&%s;" % leaf.data def character(self, leaf): raise Exception("TODO") def comment(self, leaf): return "" % leaf.data class Empty(Output): def tag(self, tag): return self.descend(tag) def data(self, leaf): return "" def entity(self, leaf): return "" def character(self, leaf): return "" def comment(self, leaf): return "" class Text(Empty): def data(self, leaf): return leaf.data def entity(self, leaf): return "&%s;" % leaf.data def character(self, leaf): # XXX: is this right? return "&#%s;" % leaf.data qpid-python-0.22/python/mllib/parsers.py0000644000175000017500000000662010651136325016450 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ Parsers for SGML and XML to dom. """ import sgmllib, xml.sax.handler from dom import * class Parser: def __init__(self): self.tree = Tree() self.node = self.tree self.nodes = [] def line(self, id, lineno, colno): while self.nodes: n = self.nodes.pop() n._line(id, lineno, colno) def add(self, node): self.node.add(node) self.nodes.append(node) def start(self, name, attrs): tag = Tag(name, *attrs) self.add(tag) self.node = tag def end(self, name): self.balance(name) self.node = self.node.parent def data(self, data): children = self.node.children if children and isinstance(children[-1], Data): children[-1].data += data else: self.add(Data(data)) def comment(self, comment): self.add(Comment(comment)) def entity(self, ref): self.add(Entity(ref)) def character(self, ref): self.add(Character(ref)) def balance(self, name = None): while self.node != self.tree and name != self.node.name: self.node.parent.extend(self.node.children) del self.node.children[:] self.node.singleton = True self.node = self.node.parent class SGMLParser(sgmllib.SGMLParser): def __init__(self, entitydefs = None): sgmllib.SGMLParser.__init__(self) if entitydefs == None: self.entitydefs = {} else: self.entitydefs = entitydefs self.parser = Parser() def unknown_starttag(self, name, attrs): self.parser.start(name, attrs) def handle_data(self, data): self.parser.data(data) def handle_comment(self, comment): self.parser.comment(comment) def unknown_entityref(self, ref): self.parser.entity(ref) def unknown_charref(self, ref): self.parser.character(ref) def unknown_endtag(self, name): self.parser.end(name) def close(self): sgmllib.SGMLParser.close(self) self.parser.balance() assert self.parser.node == self.parser.tree class XMLParser(xml.sax.handler.ContentHandler): def __init__(self): self.parser = Parser() self.locator = None def line(self): if self.locator != None: self.parser.line(self.locator.getSystemId(), self.locator.getLineNumber(), self.locator.getColumnNumber()) def setDocumentLocator(self, locator): self.locator = locator def startElement(self, name, attrs): self.parser.start(name, attrs.items()) self.line() def endElement(self, name): self.parser.end(name) self.line() def characters(self, content): self.parser.data(content) self.line() def skippedEntity(self, name): self.parser.entity(name) self.line() qpid-python-0.22/python/mllib/__init__.py0000644000175000017500000000451511231410267016524 0ustar mbamba# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ This module provides document parsing and transformation utilities for both SGML and XML. """ import os, dom, transforms, parsers, sys import xml.sax, types from xml.sax.handler import ErrorHandler from xml.sax.xmlreader import InputSource from cStringIO import StringIO def transform(node, *args): result = node for t in args: if isinstance(t, types.ClassType): t = t() result = result.dispatch(t) return result def sgml_parse(source): if isinstance(source, basestring): source = StringIO(source) fname = "" elif hasattr(source, "name"): fname = source.name p = parsers.SGMLParser() num = 1 for line in source: p.feed(line) p.parser.line(fname, num, None) num += 1 p.close() return p.parser.tree class Resolver: def __init__(self, path): self.path = path def resolveEntity(self, publicId, systemId): for p in self.path: fname = os.path.join(p, systemId) if os.path.exists(fname): source = InputSource(systemId) source.setByteStream(open(fname)) return source return InputSource(systemId) def xml_parse(filename, path=()): if sys.version_info[0:2] == (2,3): # XXX: this is for older versions of python source = "file://%s" % os.path.abspath(filename) else: source = filename h = parsers.XMLParser() p = xml.sax.make_parser() p.setContentHandler(h) p.setErrorHandler(ErrorHandler()) p.setEntityResolver(Resolver(path)) p.parse(source) return h.parser.tree def sexp(node): s = transforms.Sexp() node.dispatch(s) return s.out qpid-python-0.22/python/examples/0000755000175000017500000000000012151237730015131 5ustar mbambaqpid-python-0.22/python/examples/README.txt0000644000175000017500000000326012117625720016632 0ustar mbambaThe Python Examples =================== README.txt -- This file. api -- Directory containing drain, spout, sever, hello, and hello_xml examples. api/drain -- A simple messaging client that prints messages from the source specified on the command line. api/spout -- A simple messaging client that sends messages to the target specified on the command line. api/server -- An example server that process incoming messages and sends replies. api/hello -- An example client that sends a message and then receives it. api/hello_xml -- An example client that sends a message to the xml exchange and then receives it. reservations -- Directory containing an example machine reservation system. reservations/common.py -- Utility code used by reserve, machine-agent, and inventory scripts. reservations/reserve -- Messaging client for listing, reserving, and releasing machines. reservations/machine-agent -- Messaging server that tracks and reports on the status of its host machine and listens for reservation requests. reservations/inventory -- Messaging server that tracks the last known status of machines. qpid-python-0.22/python/examples/reservations/0000755000175000017500000000000012151237730017655 5ustar mbambaqpid-python-0.22/python/examples/reservations/machine-agent0000755000175000017500000000611711357642123022313 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import optparse, socket from qpid.messaging import * from qpid.log import enable, DEBUG, WARN from common import * host = socket.gethostname() parser = optparse.OptionParser(usage="usage: %prog [options]", description="machine reservation agent") parser.add_option("-b", "--broker", default="localhost", help="connect to specified BROKER (default %default)") parser.add_option("-d", "--database", help="database file for persistent machine status") parser.add_option("-a", "--address", default="reservations", help="address for reservation requests") parser.add_option("-i", "--identity", default=host, help="resource id (default %default)") parser.add_option("-v", dest="verbose", action="store_true", help="enable verbose logging") opts, args = parser.parse_args() if opts.verbose: enable("qpid", DEBUG) else: enable("qpid", WARN) conn = Connection.establish(opts.broker, reconnect=True, reconnect_interval=1) class Agent(Dispatcher): def __init__(self, identity): self.identity = identity self.status = FREE self.owner = None def running(self): return True def get_status(self): msg = Message(properties = {"type": "status"}, content = {"identity": self.identity, "status": self.status, "owner": self.owner}) return msg def do_discover(self, msg): r = self.get_status() return [(msg.reply_to, r)] def do_reserve(self, msg): if self.status == FREE: self.owner = msg.content["owner"] self.status = BUSY return self.do_discover(msg) def do_release(self, msg): if self.owner == msg.content["owner"]: self.status = FREE self.owner = None return self.do_discover(msg) def ignored(self, msg): patterns = msg.properties.get("identity") type = msg.properties.get("type") if patterns and match(self.identity, patterns): return type == "status" else: return True try: ssn = conn.session() rcv = ssn.receiver(opts.address) rcv.capacity = 10 snd = ssn.sender(opts.address) agent = Agent(opts.identity) snd.send(agent.get_status()) agent.run(ssn) except KeyboardInterrupt: pass finally: conn.close() qpid-python-0.22/python/examples/reservations/reserve0000755000175000017500000001360111357642123021262 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import optparse, os, sys, time from uuid import uuid4 from qpid.messaging import * from qpid.log import enable, DEBUG, WARN from common import * parser = optparse.OptionParser(usage="usage: %prog [options] PATTERN ...", description="reserve a machine") parser.add_option("-b", "--broker", default="localhost", help="connect to specified BROKER (default %default)") parser.add_option("-a", "--address", default="reservations", help="address for reservation requests") parser.add_option("-r", "--release", action="store_true", help="release any machines matching the pattern") parser.add_option("-s", "--status", action="store_true", help="list machine status") parser.add_option("-d", "--discover", action="store_true", help="use discovery instead of inventory") parser.add_option("-o", "--owner", default=os.environ["USER"], help="the holder of the reservation") parser.add_option("-n", "--number", type=int, default=1, help="the number of machines to reserve") parser.add_option("-t", "--timeout", type=float, default=10, help="timeout in seconds to wait for resources") parser.add_option("-v", dest="verbose", action="store_true", help="enable verbose logging") opts, args = parser.parse_args() if opts.verbose: enable("qpid", DEBUG) else: enable("qpid", WARN) if args: patterns = args else: patterns = ["*"] conn = Connection.establish(opts.broker) if opts.release: request_type = "release" candidate_status = BUSY candidate_owner = opts.owner else: request_type = "reserve" candidate_status = FREE candidate_owner = None class Requester(Dispatcher): def __init__(self): self.agents = {} self.requests = set() self.outstanding = set() def agent_status(self, id): status, owner = self.agents[id] if owner: return "%s %s(%s)" % (id, status, owner) else: return "%s %s" % (id, status) def correlation(self, cid): self.requests.add(cid) self.outstanding.add(cid) def ignored(self, msg): return msg.properties.get("type") not in ("status", "empty") or \ msg.correlation_id not in self.requests def do_status(self, msg): id, status, owner = get_status(msg) self.agents[id] = (status, owner) if opts.status: print self.agent_status(id) def do_empty(self, msg): print "no matching resources" def candidates(self, candidate_status, candidate_owner): for id, (status, owner) in self.agents.items(): if status == candidate_status and owner == candidate_owner: yield id def dispatch(self, msg): result = Dispatcher.dispatch(self, msg) count = msg.properties.get("count") sequence = msg.properties.get("sequence") if count and sequence == count: self.outstanding.discard(msg.correlation_id) return result try: ssn = conn.session() rcv = ssn.receiver(opts.address, capacity=10) snd = ssn.sender(opts.address) correlation_id = str(uuid4()) if opts.discover: properties = {"type": "discover", "identity": patterns} content = None else: properties = {"type": "query"} content = {"identity": patterns} snd.send(Message(reply_to = opts.address, correlation_id = correlation_id, properties = properties, content = content)) req = Requester() req.correlation(correlation_id) start = time.time() ellapsed = 0 requested = set() discovering = opts.discover while ellapsed <= opts.timeout and (discovering or req.outstanding): try: msg = rcv.fetch(opts.timeout - ellapsed) ssn.acknowledge(msg) except Empty: continue finally: ellapsed = time.time() - start req.dispatch(msg) if not opts.status: if len(requested) < opts.number: for cid in req.candidates(candidate_status, candidate_owner): if cid in requested: continue req_msg = Message(reply_to = opts.address, correlation_id = str(uuid4()), properties = {"type": request_type, "identity": [cid]}, content = {"owner": opts.owner}) if not requested: print "requesting %s:" % request_type, print cid, sys.stdout.flush() req.correlation(req_msg.correlation_id) snd.send(req_msg) requested.add(cid) else: discovering = False if requested: print owners = {} for id in requested: st, ow = req.agents[id] if not owners.has_key(ow): owners[ow] = [] owners[ow].append(id) keys = list(owners.keys()) keys.sort() for k in keys: owners[k].sort() v = ", ".join(owners[k]) if k is None: print "free: %s" % v else: print "owner %s: %s" % (k, v) elif req.agents and not opts.status: print "no available resources" if req.outstanding: print "request timed out" except KeyboardInterrupt: pass finally: conn.close() qpid-python-0.22/python/examples/reservations/inventory0000755000175000017500000000557211357642123021654 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import optparse, traceback from qpid.messaging import * from qpid.log import enable, DEBUG, WARN from common import * parser = optparse.OptionParser(usage="usage: %prog [options]", description="machine inventory agent") parser.add_option("-b", "--broker", default="localhost", help="connect to specified BROKER (default %default)") parser.add_option("-d", "--database", help="database file for persistent machine status") parser.add_option("-a", "--address", default="reservations", help="address for reservation requests") parser.add_option("-v", dest="verbose", action="store_true", help="enable verbose logging") opts, args = parser.parse_args() if opts.verbose: enable("qpid", DEBUG) else: enable("qpid", WARN) conn = Connection.establish(opts.broker, reconnect=True, reconnect_interval=1) class Inventory(Dispatcher): def __init__(self): self.agents = {} def running(self): return True def do_status(self, msg): id, status, owner = get_status(msg) self.agents[id] = (status, owner) def do_query(self, msg): patterns = msg.content["identity"] result = [] for id, (status, owner) in self.agents.items(): if match(id, patterns): r = Message(properties = { "type": "status" }, content = { "identity": id, "status": status, "owner": owner }) result.append((msg.reply_to, r)) continue if not result: result.append((msg.reply_to, Message(properties = {"type": "empty"}))) return result def ignored(self, msg): type = msg.properties.get("type") return type not in ("status", "query") try: ssn = conn.session() rcv = ssn.receiver(opts.address, capacity = 10) snd = ssn.sender(opts.address) snd.send(Message(reply_to = opts.address, properties = {"type": "discover", "identity": ["*"]})) inv = Inventory() inv.run(ssn) except KeyboardInterrupt: pass finally: conn.close() qpid-python-0.22/python/examples/reservations/common.py0000644000175000017500000000415011357642123021522 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import traceback from fnmatch import fnmatch from qpid.messaging import * class Dispatcher: def unhandled(self, msg): print "UNHANDLED MESSAGE: %s" % msg def ignored(self, msg): return False def dispatch(self, msg): try: if self.ignored(msg): return () else: type = msg.properties.get("type") replies = getattr(self, "do_%s" % type, self.unhandled)(msg) if replies is None: return () else: return replies except: traceback.print_exc() return () def run(self, session): while self.running(): msg = session.next_receiver().fetch() replies = self.dispatch(msg) count = len(replies) sequence = 1 for to, r in replies: r.correlation_id = msg.correlation_id r.properties["count"] = count r.properties["sequence"] = sequence sequence += 1 try: snd = session.sender(to) snd.send(r) except SendError, e: print e finally: snd.close() session.acknowledge(msg) def get_status(msg): return msg.content["identity"], msg.content["status"], msg.content["owner"] FREE = "free" BUSY = "busy" def match(value, patterns): for p in patterns: if fnmatch(value, p): return True return False qpid-python-0.22/python/examples/api/0000755000175000017500000000000012151237730015702 5ustar mbambaqpid-python-0.22/python/examples/api/hello_xml0000755000175000017500000000411111430035574017611 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import sys from qpid.messaging import * broker = "localhost:5672" connection = Connection(broker) try: connection.open() session = connection.session() # Set up the receiver query = """ let $w := ./weather return $w/station = 'Raleigh-Durham International Airport (KRDU)' and $w/temperature_f > 50 and $w/temperature_f - $w/dewpoint > 5 and $w/wind_speed_mph > 7 and $w/wind_speed_mph < 20 """ # query="./weather" address = """ xml; { create: always, node:{ type: queue }, link: { x-bindings: [{ exchange: xml, key: weather, arguments: { xquery: %r} }] } } """ % query receiver = session.receiver(address) # Send an observation observations = """ Raleigh-Durham International Airport (KRDU) 16 70 35 """ message = Message(subject="weather", content=observations) sender = session.sender("xml") sender.send(message) # Retrieve matching message from the receiver and print it message = receiver.fetch(timeout=1) print message.content session.acknowledge() except MessagingError,m: print m connection.close() qpid-python-0.22/python/examples/api/spout0000755000175000017500000000764412142746464017026 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import optparse, time from qpid.messaging import * from qpid.util import URL from qpid.log import enable, DEBUG, WARN def nameval(st): idx = st.find("=") if idx >= 0: name = st[0:idx] value = st[idx+1:] else: name = st value = None return name, value parser = optparse.OptionParser(usage="usage: %prog [options] ADDRESS [ CONTENT ... ]", description="Send messages to the supplied address.") parser.add_option("-b", "--broker", default="localhost", help="connect to specified BROKER (default %default)") parser.add_option("-r", "--reconnect", action="store_true", help="enable auto reconnect") parser.add_option("-i", "--reconnect-interval", type="float", default=3, help="interval between reconnect attempts") parser.add_option("-l", "--reconnect-limit", type="int", help="maximum number of reconnect attempts") parser.add_option("-c", "--count", type="int", default=1, help="stop after count messages have been sent, zero disables (default %default)") parser.add_option("-t", "--timeout", type="float", default=None, help="exit after the specified time") parser.add_option("-I", "--id", help="use the supplied id instead of generating one") parser.add_option("-S", "--subject", help="specify a subject") parser.add_option("-R", "--reply-to", help="specify reply-to address") parser.add_option("-P", "--property", dest="properties", action="append", default=[], metavar="NAME=VALUE", help="specify message property") parser.add_option("-M", "--map", dest="entries", action="append", default=[], metavar="KEY=VALUE", help="specify map entry for message body") parser.add_option("-v", dest="verbose", action="store_true", help="enable logging") opts, args = parser.parse_args() if opts.verbose: enable("qpid", DEBUG) else: enable("qpid", WARN) if opts.id is None: spout_id = str(uuid4()) else: spout_id = opts.id if args: addr = args.pop(0) else: parser.error("address is required") content = None if args: text = " ".join(args) else: text = None if opts.entries: content = {} if text: content["text"] = text for e in opts.entries: name, val = nameval(e) content[name] = val else: content = text conn = Connection(opts.broker, reconnect=opts.reconnect, reconnect_interval=opts.reconnect_interval, reconnect_limit=opts.reconnect_limit) try: conn.open() ssn = conn.session() snd = ssn.sender(addr) count = 0 start = time.time() while (opts.count == 0 or count < opts.count) and \ (opts.timeout is None or time.time() - start < opts.timeout): msg = Message(subject=opts.subject, reply_to=opts.reply_to, content=content) msg.properties["spout-id"] = "%s:%s" % (spout_id, count) for p in opts.properties: name, val = nameval(p) msg.properties[name] = val snd.send(msg) count += 1 print msg except SendError, e: print e except KeyboardInterrupt: pass conn.close() qpid-python-0.22/python/examples/api/server0000755000175000017500000000574411403732436017152 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import optparse, sys, traceback from qpid.messaging import * from qpid.util import URL from subprocess import Popen, STDOUT, PIPE from qpid.log import enable, DEBUG, WARN parser = optparse.OptionParser(usage="usage: %prog [options] ADDRESS ...", description="handle requests from the supplied address.") parser.add_option("-b", "--broker", default="localhost", help="connect to specified BROKER (default %default)") parser.add_option("-r", "--reconnect", action="store_true", help="enable auto reconnect") parser.add_option("-i", "--reconnect-interval", type="float", default=3, help="interval between reconnect attempts") parser.add_option("-l", "--reconnect-limit", type="int", help="maximum number of reconnect attempts") parser.add_option("-v", dest="verbose", action="store_true", help="enable logging") opts, args = parser.parse_args() if opts.verbose: enable("qpid", DEBUG) else: enable("qpid", WARN) if args: addr = args.pop(0) else: parser.error("address is required") conn = Connection(opts.broker, reconnect=opts.reconnect, reconnect_interval=opts.reconnect_interval, reconnect_limit=opts.reconnect_limit) def dispatch(msg): msg_type = msg.properties.get("type") if msg_type == "shell": proc = Popen(msg.content, shell=True, stderr=STDOUT, stdin=PIPE, stdout=PIPE) output, _ = proc.communicate() result = Message(output) result.properties["exit"] = proc.returncode elif msg_type == "eval": try: content = eval(msg.content) except: content = traceback.format_exc() result = Message(content) else: result = Message("unrecognized message type: %s" % msg_type) return result try: conn.open() ssn = conn.session() rcv = ssn.receiver(addr) while True: msg = rcv.fetch() response = dispatch(msg) snd = None try: snd = ssn.sender(msg.reply_to) snd.send(response) except SendError, e: print e if snd is not None: snd.close() ssn.acknowledge() except ReceiveError, e: print e except KeyboardInterrupt: pass conn.close() qpid-python-0.22/python/examples/api/statistics.py0000644000175000017500000000754112110435147020452 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import time TS = "ts" TIME_SEC = 1000000000 MILLISECOND = 1000 class Statistic: def message(self, msg): return def report(self): return "" def header(self): return "" class Throughput(Statistic): def __init__(self): self.messages = 0 self.started = False def message(self, m): self.messages += 1 if not self.started: self.start = time.time() self.started = True def header(self): return "tp(m/s)" def report(self): if self.started: elapsed = time.time() - self.start return str(int(self.messages/elapsed)) else: return "0" class ThroughputAndLatency(Throughput): def __init__(self): Throughput.__init__(self) self.total = 0.0 self.min = float('inf') self.max = -float('inf') self.samples = 0 def message(self, m): Throughput.message(self, m) if TS in m.properties: self.samples+=1 latency = MILLISECOND * (time.time() - float(m.properties[TS])/TIME_SEC) if latency > 0: self.total += latency if latency < self.min: self.min = latency if latency > self.max: self.max = latency def header(self): # Throughput.header(self) return "%s\tl-min\tl-max\tl-avg" % Throughput.header(self) def report(self): output = Throughput.report(self) if (self.samples > 0): output += "\t%.2f\t%.2f\t%.2f" %(self.min, self.max, self.total/self.samples) return output # Report batch and overall statistics class ReporterBase: def __init__(self, batch, wantHeader): self.batchSize = batch self.batchCount = 0 self.headerPrinted = not wantHeader self.overall = None self.batch = None def create(self): return # Count message in the statistics def message(self, m): if self.overall == None: self.overall = self.create() self.overall.message(m) if self.batchSize: if self.batch == None: self.batch = self.create() self.batch.message(m) self.batchCount+=1 if self.batchCount == self.batchSize: self.header() print self.batch.report() self.create() self.batchCount = 0 # Print overall report. def report(self): if self.overall == None: self.overall = self.create() self.header() print self.overall.report() def header(self): if not self.headerPrinted: if self.overall == None: self.overall = self.create() print self.overall.header() self.headerPrinted = True class Reporter(ReporterBase): def __init__(self, batchSize, wantHeader, Stats): ReporterBase.__init__(self, batchSize, wantHeader) self.__stats = Stats def create(self): ClassName = self.__stats.__class__ return ClassName() qpid-python-0.22/python/examples/api/hello0000755000175000017500000000251111430035574016733 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import sys from qpid.messaging import * if len(sys.argv)<2: broker = "localhost:5672" else: broker = sys.argv[1] if len(sys.argv)<3: address = "amq.topic" else: address = sys.argv[2] connection = Connection(broker) try: connection.open() session = connection.session() sender = session.sender(address) receiver = session.receiver(address) sender.send(Message("Hello world!")); message = receiver.fetch() print message.content session.acknowledge() except MessagingError,m: print m connection.close() qpid-python-0.22/python/examples/api/drain0000755000175000017500000000632511403732436016735 0ustar mbamba#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import optparse from qpid.messaging import * from qpid.util import URL from qpid.log import enable, DEBUG, WARN parser = optparse.OptionParser(usage="usage: %prog [options] ADDRESS ...", description="Drain messages from the supplied address.") parser.add_option("-b", "--broker", default="localhost", help="connect to specified BROKER (default %default)") parser.add_option("-c", "--count", type="int", help="number of messages to drain") parser.add_option("-f", "--forever", action="store_true", help="ignore timeout and wait forever") parser.add_option("-r", "--reconnect", action="store_true", help="enable auto reconnect") parser.add_option("-i", "--reconnect-interval", type="float", default=3, help="interval between reconnect attempts") parser.add_option("-l", "--reconnect-limit", type="int", help="maximum number of reconnect attempts") parser.add_option("-t", "--timeout", type="float", default=0, help="timeout in seconds to wait before exiting (default %default)") parser.add_option("-p", "--print", dest="format", default="%(M)s", help="format string for printing messages (default %default)") parser.add_option("-v", dest="verbose", action="store_true", help="enable logging") opts, args = parser.parse_args() if opts.verbose: enable("qpid", DEBUG) else: enable("qpid", WARN) if args: addr = args.pop(0) else: parser.error("address is required") if opts.forever: timeout = None else: timeout = opts.timeout class Formatter: def __init__(self, message): self.message = message self.environ = {"M": self.message, "P": self.message.properties, "C": self.message.content} def __getitem__(self, st): return eval(st, self.environ) conn = Connection(opts.broker, reconnect=opts.reconnect, reconnect_interval=opts.reconnect_interval, reconnect_limit=opts.reconnect_limit) try: conn.open() ssn = conn.session() rcv = ssn.receiver(addr) count = 0 while not opts.count or count < opts.count: try: msg = rcv.fetch(timeout=timeout) print opts.format % Formatter(msg) count += 1 ssn.acknowledge() except Empty: break except ReceiverError, e: print e except KeyboardInterrupt: pass conn.close() qpid-python-0.22/python/todo.txt0000644000175000017500000001136011337027252015023 0ustar mbambaKey: F = Functional PF = Partially Functional NR = Needs Additional Review ND = Needs Additional Design NF = Not Functional Connection: variables/configuration: - reconnect: F, NR, ND + reconnect functionality is done and the API semantics provided are ready for review + reconnect policies need to be finished, there is currently only one hardcoded reconnect policy (retry every three seconds), we need to define the pre-canned policies that we want to support and a means to configure them, as well as a very simple plugin/callback for defining ad-hoc policies + need to feed failover exchange into the reconnect policy + acks can be lost on reconnect + handle reconnect during commit/rollback - timeout: NF + some sort of timeout threshold for killing the connection methods: - open/__init__: F, ND + need to support kerberos + need a better way of supplying various kinds of configuration: - authentication info - transport specific configuration options, e.g - heartbeat - socket options - tcp-nodelay - multiple brokers - session: F, NR - connect: F, NR - disconnect: F, NR - connected: F, NR - close: F, NR, ND + currently there is no distinction between a "close" that does a complete handshake with the remote broker, and a "close" that reclaims resources, this means that close can fail with an exception, I don't like this as it is unclear to the user if there is a responsibility to do further cleanup in this case errors: - ConnectionError: F, NR + ConnectError F, NR + Disconnected F, NR - notification of disconnect? Session: methods: - sender: F, NR, ND + need to detail address options + need to define subject pattern semantics + consider providing convenience for sender/receiver caching + need to provide sync option, possibly change default - receiver: F, NR, ND + need to detail address options + need to define filter syntax/semantics + consider providing convenience for sender/receiver caching + need to provide sync option, possibly change default - acknowledge: F, NR - reject: NF - release: NF - commit: F, NR - rollback: F, NR - next_receiver: F, NR - close: F, ND + see comment on Connection.close errors: - SessionError: F, NR, ND + SendError: F, NR, ND + ReceiveError: F, NR, ND + should there be fatal/non fatal variants? Sender: methods: - pending: F, NR - send: F, NR - sync: F, NR, ND + currently this blocks until pending == 0, I'm thinking of renaming this to wait and adding a slightly richer interface that would let you wait for something like pending < n - close: F, NR errors: - SendError + InsufficientCapacity + need specific subhierarchy for certain conditions, e.g. no such queue Receiver: methods: - pending: F, NR - listen: F, ND + see comment on Session.fetch - fetch: F, NR, ND + explicit grant for receiver + changing capacity/prefetch to issue credit on ack rather than fetch return - sync/wait: NF - close: F, NR errors: - ReceiveError + Empty + need specific subhierarchy for certain conditions, e.g. no such queue Message: - standard message properties: F, NR, ND - map messages: F, NR + needs interop testing: NF + needs java impl: NF - list messages: F, NR, NI + needs interop testing: NF + needs java impl: NF - boxed types: NF Address: - syntax: F, NR + need to consider whitespace in name/subject + consider unquoted string concept - subject related changes, e.g. allowing patterns on both ends: NF - creating/deleting queues/exchanges F, NR + need to handle cleanup of temp queues/topics: F, NR + passthrough options for creating exchanges/queues: F, NR - integration with java: NF - queue browsing: F, NR - temporary queues: NF - xquery: NF Testing: - stress/soak testing for async: NF - stress/soak testing for reconnect: NF - interop testing: NF - multi session and multi connection client tests: NF Documentation: - api level docs largely present but need updating and elaboration - tutorial: NF Examples: - drain: F, NR - spout: F, NR - server: F, NR - client: NF - reservations: F, NR + code: F, NR + doc: NF - other examples, e.g. async? Miscellaneous: - standard ping-like (drain/spout) utilities for all clients: NF - caching of resolved addresses: F, NR - consider using separate session for query/deletion/creation of addresses qpid-python-0.22/specs/0000755000175000017500000000000012243755335013117 5ustar mbambaqpid-python-0.22/specs/NOTICE0000644000175000017500000000043711056012631014011 0ustar mbamba// ------------------------------------------------------------------ // NOTICE file corresponding to the section 4d of The Apache License, // Version 2.0, // ------------------------------------------------------------------ Apache Qpid Copyright 2006-2008 Apache Software Foundation qpid-python-0.22/specs/amqp0-9-qpid.stripped.xml0000644000175000017500000011030712142746464017614 0ustar mbamba qpid-python-0.22/specs/amqp0-8.stripped.xml0000644000175000017500000007545712142746464016700 0ustar mbamba qpid-python-0.22/specs/amqp0-9-1.stripped.xml0000644000175000017500000005044111271404220017000 0ustar mbamba qpid-python-0.22/specs/apache-filters.xml0000644000175000017500000002546011754454623016541 0ustar mbamba

Versions of AMQP prior to 1.0 prescribed a model of Exchanges and Queues, where Queues were bound to Exchanges with a binding key whose meaning depended upon the type of the Exchange. In order to allow a consistent mechanism for addressing legacy AMQP Exchanges over AMQP 1.0 the following filter types are defined. Use of an Exchange as an address for a Source thus can be seen as equivalent to constructing exclusive queues bound to an Exchange in legacy AMQP versions.

Containers which support the filters that are defined in this section should advertise the capability APACHE.ORG:LEGACY_AMQP_EXCHANGE_FILTERS in their connection capabilities when sending the open performative, and MUST provide this capability on sources supporting these filter types.

The legacy-amqp-direct-binding filter consists of a described string value. The filter matches a message if and only if the described string value exactly matches the subject field of the Properties section of the message being evaluated. If the message has no Properties section, or if the subject field of the Properties section is not set, then the legacy-amqp-direct-binding filter does not match.

The legacy-amqp-topic-binding filter consists of a described string value. The value value described by the type is interpreted as a pattern to match against the subject field of the Properties section of the message being evaluated.

  • The pattern is formed using zero or more tokens, with each token delimited by the "." character. The tokens "#" and "*" have special meanings.
  • The token consisting of the single character "*" matches a single word in the subject field.
  • The token consisting of the single character "#" matches zero or more words in the subject field.

Thus the filter value "*.stock.#" would match the subjects "usd.stock" and "eur.stock.db" but not "stock.nasdaq".

If the message has no Properties section, or if the subject field of the Properties section is not set, then the legacy-amqp-topic-binding filter matches only if the value of the filter is a single "#".

The legacy-amqp-headers-binding filter consists of a described map value. The map value described by the type is interpreted as a pattern to match against the application-properties section of the message being evaluated. The map has the same restriction as the application-properties section, namely that the keys of this are restricted to be of type string (which excludes the possibility of a null key) and the values are restricted to be of simple types only, that is, excluding map, list, and array types.

The key "x-match" in the described map has special meaning. This key MUST map to the symbolic value "any" or the symbolic value "all" within the described map. All other keys which begin "x-" MUST be ignored by the source when evaluating. If the value for "x-match" is "all" then all other valid key-value pairs in the map MUST match with an entry with the same key in the application-properties section. If the value for "x-match" is "any" then the filter will accept the message if at least one key-value pair matches the equivalent key value pair in the application-properties section.

A key-value pair in the filter's map matches a key-value pair in the application-properties section if the keys are identical (including the same type), or if the value in the filter map for the key is null.

The Java Message Service defines two types of filtering of messages: firstly the ability to exclude from a subscription messages sent by the same connection, secondly a more general filtering syntax known as "selectors" based on an SQL like syntax.

AMQP filter extensions through which these two types of filtering may be achieved are defined below. Their use, though motivated by support for JMS, is not restricted to JMS.

A message will be accepted by the simple-no-local-filter if and only if the message was originally sent to the container of the source on a separate connection from that which is currently receiving from the source.

Containers which support this filter should advertise the capability APACHE.ORG:NO_LOCAL in their connection capabilities when sending the open performative, and MUST provide this capability on sources supporting these filter types.

The Java Message Service "selector" defines an SQL like syntax for filtering messages. The selector filters based on the values of "headers" and "properties". The selector-filter uses the selector as defined by JMS but with the names of JMS headers translated into their AMQP equivalents. The defined JMS headers can be mapped to equivalent fields within the AMQP message sections:

JMS Header Name | AMQP 1.0 Field ==================|==================================================== JMSCorrelationID | correlation-id field of properties section JMSDeliveryMode | durable field of header section JMSDestination | to field of the properties section JMSExpiration | absolute-expiry-time of properties section JMSMessageID | message-id of properties section JMSPriority | priority field of header section JMSRedelivered | delivery-count > 0 in header section JMSReplyTo | reply-to in properties section JMSTimestamp | creation-time of properties section JMSType | annotation jms-type in message-annotations section

When encoding the selector string on the wire, these JMS header names should be translated to amqp.field_name where field_name is the appropriate AMQP 1.0 field named in the table above, with the hyphen replaced by an underscore. For example, the selector: "JMSCorrelationID = 'abc' AND color = 'blue' AND weight > 2500" would be transferred over the wire as: "amqp.correlation_id = 'abc' AND color = 'blue' AND weight > 2500"

The "properties" of the JMS message are equivalent to the AMQP application-properties section. Thus a reference to a property Foo in a message selector would be evaluated as the value associated with the key "Foo" (if present) in the application-properties section.

The operands of the JMS selector are defined in terms of the types available within JMS, When evaluated against the application properties section, the values within that section MUST be evaluated according to the following type mapping.

AMQP Type | JMS Selector Type ==================|=================== null | null boolean | boolean ubyte | short ushort | int uint | long ulong | long byte | byte short | short int | int long | long float | float double | double decimal32 | double decimal64 | double decimal128 | double char | char timestamp | long uuid | byte[16] binary | byte[] string | String symbol | String

Containers which support this filter should advertise the capability APACHE.ORG:SELECTOR in their connection capabilities when sending the open performative, and MUST provide this capability on sources supporting these filter types.

qpid-python-0.22/specs/amqp.0-10-qpid-errata.stripped.xml0000644000175000017500000013623412142746464021225 0ustar mbamba
qpid-python-0.22/specs/amqp.xsl0000644000175000017500000007541211754454623014620 0ustar mbamba AMQP <xsl:call-template name="initCap"><xsl:with-param name="input" select="@name"/></xsl:call-template>

AMQP

Table of Contents

#section- #doc-        #-             

section-

doc-

doc-

pre #section- #type- #doc- .xml

type-

signature : Encodings: composite

type-

composite fields mandatory optional []

type-

compositepossible values anchorchoice--
<type name="" class="" source="" provides=""/> </type>
<descriptor name="" code=""/> <choice =""/> <field =""/> </field> <error =""/> code : fixed-width, byte value variable-width, byte size
#type-
TypeEncodingCodeCategoryDescription
#type- /
error:

definition-

signature :
. .. . * #type- .xml #type- #choice-- .xml #choice--
qpid-python-0.22/specs/amqp.0-10.stripped.xml0000644000175000017500000013604712142746464017020 0ustar mbamba
qpid-python-0.22/specs/amqp-errata.0-9.xml0000644000175000017500000000332510577560477016373 0ustar mbamba This flag tells the server how to react if the message cannot be routed to a queue. If this flag is set, the server will return an unroutable message with a Return method. If this flag is zero, the server silently drops the message. qpid-python-0.22/specs/amqp0-9.stripped.xml0000644000175000017500000010556712142746464016675 0ustar mbamba qpid-python-0.22/specs/management-schema.xml0000644000175000017500000010561412121653211017203 0ustar mbamba This class represents an inter-broker connection. qpid-python-0.22/specs/amqp-nogen.0-9.xml0000644000175000017500000000261410577560477016223 0ustar mbamba qpid-python-0.22/specs/amqp0-8-qpid.stripped.xml0000644000175000017500000007767512142746464017637 0ustar mbamba qpid-python-0.22/specs/amqp-dtx-preview.0-9.xml0000644000175000017500000012171710651117435017362 0ustar mbamba The rollback was caused by an unspecified reason. A transaction branch took too long. The transaction branch may have been heuristically completed. The transaction branch has been heuristically committed. The transaction branch has been heuristically rolled back. The transaction branch has been heuristically committed and rolled back. The transaction branch was read-only and has been committed. Normal execution. An Xid uniquely identifies a transaction branch. Xid contains a format identifier, two length fields and a data field: format_id long gtrid_length octet bqual_length octet data format_id is an implementation specific format identifier the data field is a sequence of octets of at most 128 bytes containing the txn id and the branch id gtrid_length field indicates how many bytes of this form the transaction id bqual_length field indicates how many bytes of this form the branch id The sum of the two lengths must equal the length of the data field This class is part of the X-Open XA distributed transaction protocol support. It allows a channel to be selected for use with distributed transactions and the transactional boundaries for work on that channel to be demarcated. dtx-demarcation = C:SELECT S:SELECT-OK *demarcation demarcation = C:START S:START-OK C:END S:END-OK Access-tickets are propagated with XA association methods with the aim of restricting which users are allowed to control which transactions. The server MAY restrict transaction association to a particular identity. Enabling XA transaction support on a channel implies that the server MUST manage transactions demarcated by start-end blocks. That is to say that on this XA-enabled channel, work undergone within transactional blocks is performed on behalf a transaction branch whereas work performed outside of transactional blocks is NOT transactional. This method sets the channel to use distributed transactions. The client must use this method at least once on a channel before using XA demarcation operations. This method confirms to the client that the channel was successfully set to use distributed transactions. This method is called when messages should be produced and consumed on behalf a transaction branch identified by xid. If the method is invoked in an improper context (see class grammar) then the server MUST raise a channel exception with reply code 503 (command invalid) If neither join nor resume is specified is specified and the transaction branch specified by xid has previously been seen then the server MUST raise a channel exception with reply code 530 (not allowed). If join and resume are specified then the server MUST raise a channel exception with reply code 503 (command invalid) Access-ticket granted by the server for a specific realm. Specifies the xid of the transaction branch to be started. If Xid is already known by the broker then the server MUST raise a channel exception with reply code 530 (not allowed). Indicate that the start applies to joining a transaction previously seen. If the broker does not support join the server MUST raise a channel exception with reply code 540 (not implemented). Indicate that the start applies to resuming a suspended transaction branch specified. This method confirms to the client that the transaction branch is started or specify the error condition. xa-ok: Normal execution. xa-rbrollback: The broker marked the transaction branch rollback-only for an unspecified reason. This method is called when the work done on behalf a transaction branch finishes or needs to be suspended. If the method is invoked in an improper context (see class grammar) then the server MUST raise a channel exception with reply code 503 (command invalid) If suspend and fail are specified then the server MUST raise a channel exception with reply code 503 (command invalid) If an error occurs in ending the transaction branch then the server MUST raise a channel exception with reply code 541 (internal error) If neither fail nor suspend are specified then the portion of work has completed successfully Access-ticket granted by the server for a specific realm. Specifies the xid of the transaction branch to be ended. If Xid is unknown (the transaction branch has not been started or has already been ended) then the server MUST raise a channel exception with reply code 404 (not found). Indicates that the portion of work has failed otherwise the portion of work has completed successfully. If fail is specified then the transaction should be marked as rollback-only. Indicates that the transaction branch is temporarily suspended in an incomplete state. The transaction context is in a suspended state and must be resumed via the start method with resume specified. This method confirms to the client that the transaction branch is ended or specify the error condition. xa-ok: Normal execution. xa-rbrollback: The broker marked the transaction branch rollback-only for an unspecified reason. xa-rbtimeout: The work represented by this transaction branch took too long. This class is part of the X-Open XA distributed transaction protocol support. It allows the transaction manager to coordinate transaction outcomes. dtx-coordination = *coordination coordination = command / outcome / recovery command = C:SET-TIMEOUT S:SET-TIMEOUT-OK / C:GET-TIMEOUT S:GET-TIMEOUT-OK outcome = one-phase-commit / one-phase-rollback / two-phase-commit / two-phase-rollback one-phase-commit = C:COMMIT S:COMMIT-OK one-phase-rollback = C:ROLLBACK S:ROLLBACK-OK two-phase-commit = C:PREPARE S:PREPARE-OK C:COMMIT S:COMMIT-OK two-phase-rollback = C:PREPARE S:PREPARE-OK C:ROLLBACK S:ROLLBACK-OK recovery = C:RECOVER S:RECOVER-OK *recovery-outcome recovery-outcome = one-phase-commit / one-phase-rollback / C:FORGET S:FORGET-OK Access-tickets are propagated with XA demarcation methods with the aim of restricting which users are allowed to control which transactions. The server MAY restrict transaction coordination to a particular identity. This method commits the work associated with xid. Any produced messages are made available and any consumed messages are discarded. If an error occurs in committing the transaction branch then the server MUST raise a channel exception with reply code 541 (internal error) If the method is invoked in an improper context (see class grammar) then the server MUST raise a channel exception with reply code 503 (command invalid) Access-ticket granted by the server for a specific realm. Specifies the Xid of the transaction branch to be committed. If Xid is unknown (the transaction branch has not been started or has already been ended) then the server MUST raise a channel exception with reply code 404 (not found). If this method is called when Xid is still associated with a channel then the server MUST raise a channel exception with reply code 503 (command invalid) When set then one-phase commit optimization is used. This method confirms to the client that the transaction branch is committed or specify the error condition. xa-ok: Normal execution, xa-heurhaz: Due to some failure, the work done on behalf of the specified transaction branch may have been heuristically completed. xa-heurcom: Due to a heuristic decision, the work done on behalf of the specified transaction branch was committed. xa-heurrb: Due to a heuristic decision, the work done on behalf of the specified transaction branch was rolled back. xa-heurmix: Due to a heuristic decision, the work done on behalf of the specified transaction branch was partially committed and partially rolled back. xa-rbrollback: The broker marked the transaction branch rollback-only for an unspecified reason. xa-rbtimeout: The work represented by this transaction branch took too long. This method is called to forget about a heuristically completed transaction branch. If an error occurs in forgetting the transaction branch then the server MUST raise a channel exception with reply code 541 (internal error) If the method is invoked in an improper context (see class grammar) then the server MUST raise a channel exception with reply code 503 (command invalid) Access-ticket granted by the server for a specific realm. Specifies the xid of the transaction branch to be forgotten. If Xid is unknown (the transaction branch has not been started or has already been ended) then the server MUST raise a channel exception with reply code 404 (not found). If this method is called when Xid is still associated with a channel then the server MUST raise a channel exception with reply code 503 (command invalid) This method confirms to the client that the transaction branch is forgotten or specify the error condition. This method obtains the current transaction timeout value in seconds. If setTimeout was not used prior to invoking this method, the return value is the default timeout; otherwise, the value used in the previous setTimeout call is returned. Specifies the Xid of the transaction branch for getting the timeout. If Xid is unknown (the transaction branch has not been started or has already been ended) then the server MUST raise a channel exception with reply code 404 (not found). If an error occurs in setting the transaction timeout then the server MUST raise a channel exception with reply code 541 (internal error) This method returns the current transaction timeout value in seconds. The current transaction timeout value in seconds. This method prepares for commitment any message produced or consumed on behalf of xid. If an error occurs in preparing the transaction branch then the server MUST raise a channel exception with reply code 541 (internal error). The specified Xid may or may not have been prepared. If the method is invoked in an improper context (see class grammar) then the server MUST raise a channel exception with reply code 503 (command invalid) Once this method successfully returns it is guaranteed that the transaction branch may be either committed or rolled back regardless of failures. The knowledge of xid cannot be erased before commit or rollback complete the branch. Access-ticket granted by the server for a specific realm. Specifies the Xid of the transaction branch that can be prepared. If Xid is unknown (the transaction branch has not been started or has already been ended) then the server MUST raise a channel exception with reply code 404 (not found). If this method is called when Xid is still associated with a channel then the server MUST raise a channel exception with reply code 503 (command invalid) This method confirms to the client that the transaction branch is prepared or specify the error condition. xa-ok: Normal execution. xa-rdonly: The transaction branch was read-only and has been committed. xa-rbrollback: The broker marked the transaction branch rollback-only for an unspecified reason. xa-rbtimeout: The work represented by this transaction branch took too long. This method is called to obtain a list of transaction branches that are in a prepared or heuristically completed state. If an error occurs in recovering then the server MUST raise a channel exception with reply code 541 (internal error) If this endscan is used in conjunction with startscan then a single call starts and then ends a scan. If none of endscan and startscan are set then a recovery scan must already be started otherwise the server MUST raise a channel exception with reply code 503 (command invalid) Access-ticket granted by the server for a specific realm. Indicates that recovery scan should start. If a recovery scan is already open, the effect is as if the recovery scan were ended and then restarted. Indicates that the recovery scan should end after returning the Xids. Returns to the client a table of transaction Xids that are in a prepared or heuristically completed state. table containing transaction Xids that are in a prepared or heuristically completed state. This method rolls back the work associated with xid. Any produced messages are discarded and any consumed messages are re-enqueued. If an error occurs in rolling back the transaction branch then the server MUST raise a channel exception with reply code 541 (internal error) If the method is invoked in an improper context (see class grammar) then the server MUST raise a channel exception with reply code 503 (command invalid) Access-ticket granted by the server for a specific realm. Specifies the Xid of the transaction branch that can be rolled back. If Xid is unknown (the transaction branch has not been started or has already been ended) then the server MUST raise a channel exception with reply code 404 (not found). If this method is called when Xid is still associated with a channel then the server MUST raise a channel exception with reply code 503 (command invalid) This method confirms to the client that the transaction branch is rolled back or specify the error condition. xa-ok: Normal execution, xa-heurhaz: Due to some failure, the work done on behalf of the specified transaction branch may have been heuristically completed. xa-heurcom: Due to a heuristic decision, the work done on behalf of the specified transaction branch was committed. xa-heurrb: Due to a heuristic decision, the work done on behalf of the specified transaction branch was rolled back. xa-heurmix: Due to a heuristic decision, the work done on behalf of the specified transaction branch was partially committed and partially rolled back. xa-rbrollback: The broker marked the transaction branch rollback-only for an unspecified reason. xa-rbtimeout: The work represented by this transaction branch took too long. Sets the specified transaction branch timeout value in seconds. If an error occurs in setting the transaction timeout then the server MUST raise a channel exception with reply code 541 (internal error) Once set, this timeout value is effective until this method is reinvoked with a different value. A value of zero resets the timeout value to the default value. Access-ticket granted by the server for a specific realm. Specifies the Xid of the transaction branch for setting the timeout. If Xid is unknown (the transaction branch has not been started or has already been ended) then the server MUST raise a channel exception with reply code 404 (not found). The transaction timeout value in seconds. This method confirms that the timeout has been set. This is a utility class for querying and exchange about its bindings to queues. This method is used to request information on the bindings to a particular exchange. That information is conveyed in a query-ok method. A valid ticket should be provided. The name of the exchange for which binding information is being requested. If not specified explicitly the default exchange is implied. If populated then determine whether the given queue is bound to the exchange. If populated defines the routing key of the binding of interest, if not populated the request will ignore the routing key on bindings when searching for a match. If populated defines the arguments of the binding of interest if not populated the request will ignore the arguments on bindings when searching for a match This method is used in response to a query and conveys information on the bindings to a particular exchange. If set, the exchange for which information was requested is not known. If set, the queue specified is not known. A bit which if set indicates that no binding was found from the specified exchange to the specified queue. A bit which if set indicates that no binding was found from the specified exchange with the specified routing key. A bit which if set indicates that no binding was found from the specified exchange with the specified arguments. This method is used to request information on a particular exchange. That information is conveyed by an query-ok method. A valid ticket should be provided. The name of the exchange for which information is requested. If not specified explicitly the default exchange is implied. This method is used in response to a query request and conveys information on a particular exchange. The type of the exchange. Will be empty if the exchange is not found. The durability of the exchange, i.e. if set the exchange is durable. Will not be set if the exchange is not found. If set, the exchange for which information was requested is not known. A set of properties of the exchange whose syntax and semantics depends on the server implementation. Will be empty if the exchange is not found. This class allows for efficiently communicating information about completion of processing. The low-water mark for executed command-ids. All ids below this mark have been executed; above this mark, there are gaps containing unexecuted command ids (i.e. discontinuous). By definition, the first id above this mark (if it exists) is an unexecuted command-id. qpid-python-0.22/specs/LICENSE0000644000175000017500000002613612142746464014135 0ustar mbamba Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. qpid-python-0.22/specs/cluster.0-8.xml0000644000175000017500000000334411026246517015624 0ustar mbamba An extension that allows brokers to communicate in order to provide a clustered service to clients.