oslo.messaging-1.3.0/0000775000175300017540000000000012316527535015611 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/tox.ini0000664000175300017540000000076612316527457017140 0ustar jenkinsjenkins00000000000000[tox] envlist = py26,py27,pep8 [testenv] setenv = VIRTUAL_ENV={envdir} OS_TEST_TIMEOUT=30 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = python setup.py testr --slowest --testr-args='{posargs}' [testenv:pep8] commands = flake8 [testenv:cover] setenv = VIRTUAL_ENV={envdir} commands = python setup.py testr --coverage [testenv:venv] commands = {posargs} [flake8] show-source = True exclude = .tox,dist,doc,*.egg,build,__init__.py builtins = _ oslo.messaging-1.3.0/setup.py0000664000175300017540000000141512316527457017327 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools setuptools.setup( setup_requires=['pbr'], pbr=True) oslo.messaging-1.3.0/LICENSE0000664000175300017540000002665212316527457016634 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. --- License for python-keystoneclient versions prior to 2.1 --- All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of this project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. oslo.messaging-1.3.0/PKG-INFO0000664000175300017540000000172012316527535016706 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: oslo.messaging Version: 1.3.0 Summary: Oslo Messaging API Home-page: https://launchpad.net/oslo Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: Oslo Messaging Library ====================== The Oslo messaging API supports RPC and notifications over a number of different messaging transports. See also: `Library Documentation `_ Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 oslo.messaging-1.3.0/CONTRIBUTING.rst0000664000175300017540000000103212316527457020251 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in the "If you're a developer, start here" section of this page: http://wiki.openstack.org/HowToContribute Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://wiki.openstack.org/GerritWorkflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/oslo oslo.messaging-1.3.0/ChangeLog0000664000175300017540000003077112316527535017373 0ustar jenkinsjenkins00000000000000CHANGES ======= 1.3.0 ----- * Add release notes for 1.3.0 * Fix wrong parameter description in docstring * Fixed inconsistent EventletContextManagerSpawnTest failures * Use messaging_conf fixture configuration by default * Fixed possible pep8 failure due to pyflakes bug * Refactor AMQP message broker selection * Add unit test to check the order of Qpid hosts on reconnect * Clean up for qpid tests * Add kombu driver library to requirements.txt * Use driver's notify_send() method again * Remove vim header * Updated from global requirements * Fixed spelling error - runnung to running * Build log_handler documentation * Add release notes up to 1.3.0a9 1.3.0a9 ------- * Remove use of sslutils 1.3.0a8 ------- * Expose PublishErrorsHandler through oslo.messaging * notify listener: document the metadata callback parameter * Add missing data into the notif. endpoint callback * notification listener: add allow_requeue param * Adds unit test cases to impl_qpid * Do not leak _unique_id out of amqp drivers * Add multiple exchange per listerner in fake driver * Allow to requeue the notification message * Slow down Kombu reconnect attempts * Don't run python 3 tests by default * Gracefully handle consumer cancel notifications * Updated from global requirements * Convert to oslo.test * Add log_handler to oslo.messaging * Add a link to the docs from the README * Pass the session to QpidMessage constructor * User a more accurate max_delay for reconnects * Make the dispatcher responsible of the message ack * Don't reply to notification message * Abstract the acknowledge layer of a message * Implements notification listener and dispatcher * Switch over to oslosphinx * Improve help strings * Update ExpectedException handling * Ignore any egg and egg-info directories * Qpid: advance thru the list of brokers on reconnect * RabbitMQ: advance thru the list of brokers on reconnect 1.3.0a7 ------- * Make the dispatcher responsible to listen() * Allow fake driver to consume multiple topics * Allow different login methods to be used with kombu connections 1.3.0a6 ------- * Use stevedore's make_test_instance * Expose an entry point to list all config options * Fix test case name typo * Fix UnboundLocalError error 1.3.0a5 ------- * Fix help strings * Add release notes for 1.3.0a3 * python3: Switch to mox3 instead of mox * Remove dependencies on pep8, pyflakes and flake8 * Routing notifier 1.3.0a4 ------- * Removes use of timeutils.set_time_override * Fix spelling errors in comments * Fix test_notifier_logger for Python 3 * Minor Python 3 fixes * Remove copyright from empty files * Fix duplicate topic messages for Qpid topology=2 * Replace dict.iteritems() with six.iteritems() * Remove unused eventlet/greenlet from qpid/rabbit * fix test_rabbit for Python 3 * Fix try/except syntax for Python 3 * Fix exception deserialiation on Python 3 * Add Sample priority * sysnchronize oslo-incubator modules * Remove eventlet related code in amqp driver * Fix syntax of relative imports for Python3 * Updated from global requirements * Updated from global requirements * Unify different names between Python2 and Python3 * Replace data structures' attribute with six module * Avoid creating qpid connection twice in initialization * Use six.moves.queue instead of Queue * Add transport aliases * Remove the partial implementation of ack_on_error * Fixed misspellings of common words * Add release notes for 1.3.0a2 * Unify different names between Python2/3 with six.moves * Remove vim header * Ensure context type is handled when using to_dict * Refactors boolean returns 1.3.0a2 ------- * Simplify common base exception prototype * Properly reconnect subscribing clients when QPID broker restarts * Remove useless global vars / import * Avoid storing configuration in notifier * Implement a log handler using notifier * notifier: add audit level * Add 'warning' as an alias to 'warn' * Decouple from Oslo uuidutils module * Supply missing argument to raise_invalid_topology_version() * Support a new qpid topology * Remove hosts as property in TransportURL * Remove property on virtual_host in TransportURL * Updated from global requirements * Fix some typos and adjust capitalization * Changes driver method for notifications 1.3.0a1 ------- * Properly handle transport URL config on the client * Updated from global requirements * Updated from global requirements * Replace assertEquals with assertEqual * Properly handle transport:///vhost URL * Updated from global requirements * Make rpc_backend default to 'rabbit' * Apply six for metaclass * Add third element to RPC versions for backports * Fix rpc client docs * Updated from global requirements * Remove cruft from setup.cfg * Updated from global requirements * Fixes a typo in the address string syntax * Implement the server side of ZmqDriver * Add zmq-receiver * Implement the client side of ZmqDriver * Import zmq driver code with minimal modifications 1.2.0a11 -------- * Fix race-condition in rabbit reply processing * Fix error message if message handler fails * Don't include msg_id or reply_q in casts * Remove check_for_lock support in RPCClient 1.2.0a10 -------- * Add a Notifier.prepare( 1.2.0a9 ------- * Fix dictionary changed size during iteration 1.2.0a8 ------- * Fix transport URL parsing bug 1.2.0a7 ------- * Fix rabbit driver handling of None, etc. replies 1.2.0a6 ------- * Remove ConfFixture from toplevel public API * Fix fake driver handling of failure replies * Bumps hacking to 0.7.0 * Fix transport URL ipv6 parsing support 1.2.0a5 ------- * Fix handling of None, etc. replies 1.2.0a4 ------- 1.2.0a3 ------- * Add a unit testing configuration fixture * Add a TransportURL class to the public API 1.2.0a2 ------- * Ensure namespace package is installed 1.2.0a1 ------- * Add transport URL support to rabbit driver * Kill ability to specify exchange in transport URL * Fix capitalization, it's OpenStack * Fix handling expected exceptions in rabbit driver * Add thread-local store of request context * Add a context serialization hook * Removes a redundant version_is_compatible function * Document how call() handles remote exceptions * Add a per-transport allow_remote_exmods API * Expose RemoteError exception in the public API * Implement failure replies in the fake driver * Add API for expected endpoint exceptions * Add a driver method specifically for sending notifications * Enforce target preconditions outside of drivers * Add comments to ReplyWaiter.wait() * Remove some FIXMEs and debug logging * Remove unused IncomingMessage.done() * Implement wait_for_reply timeout in rabbit driver * Use testtools.TestCase assertion methods * Implement failure replies in rabbit driver * Add test with multiple waiting sender threads * Fix race condition in ReplyWaiters.wake_all() * Add rabbit unit test for sending and receiving replies * Add some docs on target version numbers * Add tests for rabbit driver wire protcol * Pop _unique_id when checking for duplicates * Add a transport cleanup() method * Remove my notes and test scripts * Add initial qpid driver * Move most new rabbit driver code into amqpdriver * Move rpc_conn_pool_size into amqp * Add simple rabbit driver unit test * Temporarily add eventlet to requirements * Add missing gettextutils * Add unit tests for object pool * Remove only_free param to Pool.get() * Connection pool bugfix * Remove unused file * Add exception serialization tests * Don't call consume() each time iterconsume() is called * Add test code for the rabbit driver * Remove use of gettextutils * Add initial rabbit driver * Remove use of openstack.common.local * Use stdlib logging * Don't register options with cfg.CONF at module import * Port away from some eventlet infrastructure * Adjust imports in rabbit/qpid drivers * Import some needed modules from oslo-incubator * Add oslo-incubator code unmodified * Make executor threads more robust * Allow use of hacking 0.6.0 and fix min version * Include docstrings in published docs * Use oslo.sphinx and remove local copy of doc theme * Add some notes * Unit tests for notifier * Make test notifier useful * Use lowercase priority in log notifier * Use lowercase priority in notifications topic * Handle case where no drivers configured * Fix buglet in v2 messaging notifier * Make LOG private in notifier * Require a transport to construct a Notifier * Add serializer support to notifier * Rename context to ctxt in serializer API * Rename context to ctxt in notify API * Make Notifier public at top-level * Docstrings for notifier API * Fix notify drivers namespace * Remove backwards compat entry point aliases * Simplify public symbol exports * Use assertEqual() rather than assertEquals() * Remove accidental use of messaging.rpc_server * Make exchange_from_url() use parse_url() * Unit tests for URL parsing code * Fix parse_urls() buglets * Move url utils into messaging._urls * Don't use common logging * Update example scripts for recent API changes * Fix fake driver with eventlet * Use log.warning() instead of log.warn() * Fix some pep8 issues * Don't translate exception messages * Knock off a few TODOs * Add can_send_version() to RPCClient * Check start() does nothing on a running server * Remove unused statements in base serializer * Fix thinko in exchange_from_url() * Call wait() in server tests * Add docstrings for base executor class * Remove a fixed fixme * Add a client call timeout test * Don't raise a driver-specific error on send * Add some docstrings to driver base * Test a bunch more RPC server scenarios * Make it possible to call prepare() on call context * Rework how queues get created in fake driver * Use testscenarios * Move files to new locations for oslo.messaging * Import stuff from oslo-incubator * Add oslo.messaging project infrastructure * Add some RPC server tests * More gracefully handle "no listeners" in fake driver * Better error handling in server.start() * Re-work server API to eliminate server subclasses * Add license header to _executors/__init__.py * Add RPCDispatcher tests * Check for return value in client serializer test * Add note about can_send_version() * More client unit tests * Make RPCClient.check_for_lock a callable * Apply version cap check when casting * Make RPCVersionCapError extend base exception * Remove a bogus param from client.prepare() docs * pep8 fixes for serializer code * Simple RPCClient test * Unit tests * Move some stuff into doc/ * Implement Target.__eq__() * Fix bug in exchange_from_url() * pep8 fixes for fake driver * Make utils.parse_url() docstring pep8 compliant * Don't translate exceptions * Misc pep8 fixes * pep8 fixes for toplevel package * Some error handling improvements * Recommend wrapping the client class rather than subclassing * Document how to use RPCClient directly * Document the public RPC API * Fix defaults for client.prepare() args * Fix client.cast() typo * Fix version_cap typo * Allow all target attributes in client.prepare() * Expose Serializer from top-level namespace * Allow specifying a serializer when creating a server * Make endpoint.target optional * Dispatch methods in their own greenthreads * Make rpc.dispatcher private * Make the base RPCServer class private * Fix typo with the serializer work * Update use of stevedore * Require topics and target in notify driver constructors * Add generic serialization support * Support namespace in RPCClient.prepare() * Add parse_url to _utils * Remove entry point lists from the public API * Support capping message versions in the client * Fix RPCClient check_for_lock() * First cut at the notifier API * Add some notes * Add IncomingMessage abstraction * Pass a context dict * Fix docstring * Implement a fake driver * Adding reply infrastructure * Add some exceptions * Fix buglet with default timeout * Fix target/namespace target buglet * Fix rpc client buglets * Fix 'Blockinging' typos * Missing self parameter to server start() * Fix default_exchange typo * Add forgotten piece of eventlet executor * It's _executors not _executor * Make poll() just return the message * Make drivers list public again * Add top-level convenience aliases * Prefix the executors module with underscore * Prefix the messaging.server module with an underscore * Prefix the drivers module with an underscore * Make transport methods private * Fix little typo in server exception class name * Add missing utils module * Add convenience RPC server classes * Update changes.txt for recent API changes * Use : for loading classes in entry_points * Split the dispatcher from the executor and server * Make driver and transport methods public * Pass the driver instance to the listener instead of config * Try out replacing "executor" for "dispatcher" * Fix host vs server typo * Initial framework oslo.messaging-1.3.0/requirements.txt0000664000175300017540000000051212316527457021076 0ustar jenkinsjenkins00000000000000oslo.config>=1.2.0 stevedore>=0.14 # for timeutils iso8601>=0.1.9 # for jsonutils six>=1.5.2 # FIXME(markmc): remove this when the drivers no longer # import eventlet eventlet>=0.13.0 # used by openstack/common/gettextutils.py Babel>=1.3 # for the routing notifier PyYAML>=3.1.0 # rabbit driver is the default kombu>=2.4.8 oslo.messaging-1.3.0/doc/0000775000175300017540000000000012316527535016356 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/doc/source/0000775000175300017540000000000012316527535017656 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/doc/source/notifier.rst0000664000175300017540000000031612316527457022232 0ustar jenkinsjenkins00000000000000-------- Notifier -------- .. currentmodule:: oslo.messaging .. autoclass:: Notifier :members: .. autoclass:: LoggingNotificationHandler :members: .. autoclass:: PublishErrorsHandler :members: oslo.messaging-1.3.0/doc/source/target.rst0000664000175300017540000000427212316527457021706 0ustar jenkinsjenkins00000000000000------ Target ------ .. currentmodule:: oslo.messaging .. autoclass:: Target =============== Target Versions =============== Target version numbers take the form Major.Minor. For a given message with version X.Y, the server must be marked as able to handle messages of version A.B, where A == X and B >= Y. The Major version number should be incremented for an almost completely new API. The Minor version number would be incremented for backwards compatible changes to an existing API. A backwards compatible change could be something like adding a new method, adding an argument to an existing method (but not requiring it), or changing the type for an existing argument (but still handling the old type as well). If no version is specified it defaults to '1.0'. In the case of RPC, if you wish to allow your server interfaces to evolve such that clients do not need to be updated in lockstep with the server, you should take care to implement the server changes in a backwards compatible and have the clients specify which interface version they require for each method. Adding a new method to an endpoint is a backwards compatible change and the version attribute of the endpoint's target should be bumped from X.Y to X.Y+1. On the client side, the new RPC invocation should have a specific version specified to indicate the minimum API version that must be implemented for the method to be supported. For example:: def get_host_uptime(self, ctxt, host): cctxt = self.client.prepare(server=host, version='1.1') return cctxt.call(ctxt, 'get_host_uptime') In this case, version '1.1' is the first version that supported the get_host_uptime() method. Adding a new parameter to an RPC method can be made backwards compatible. The endpoint version on the server side should be bumped. The implementation of the method must not expect the parameter to be present.:: def some_remote_method(self, arg1, arg2, newarg=None): # The code needs to deal with newarg=None for cases # where an older client sends a message without it. pass On the client side, the same changes should be made as in example 1. The minimum version that supports the new parameter should be specified. oslo.messaging-1.3.0/doc/source/server.rst0000664000175300017540000000051012316527457021715 0ustar jenkinsjenkins00000000000000------ Server ------ .. automodule:: oslo.messaging.rpc.server .. currentmodule:: oslo.messaging .. autofunction:: get_rpc_server .. autoclass:: RPCDispatcher .. autoclass:: MessageHandlingServer :members: .. autofunction:: expected_exceptions .. autoexception:: ExpectedException .. autofunction:: get_local_context oslo.messaging-1.3.0/doc/source/notification_listener.rst0000664000175300017540000000043212316527457025005 0ustar jenkinsjenkins00000000000000--------------------- Notification Listener --------------------- .. automodule:: oslo.messaging.notify.listener .. currentmodule:: oslo.messaging .. autofunction:: get_notification_listener .. autoclass:: MessageHandlingServer :members: .. autofunction:: get_local_context oslo.messaging-1.3.0/doc/source/serializer.rst0000664000175300017540000000021312316527457022560 0ustar jenkinsjenkins00000000000000---------- Serializer ---------- .. currentmodule:: oslo.messaging .. autoclass:: Serializer :members: .. autoclass:: NoOpSerializer oslo.messaging-1.3.0/doc/source/exceptions.rst0000664000175300017540000000100412316527457022567 0ustar jenkinsjenkins00000000000000---------- Exceptions ---------- .. currentmodule:: oslo.messaging .. autoexception:: ClientSendError .. autoexception:: DriverLoadFailure .. autoexception:: ExecutorLoadFailure .. autoexception:: InvalidTransportURL .. autoexception:: MessagingException .. autoexception:: MessagingTimeout .. autoexception:: MessagingServerError .. autoexception:: NoSuchMethod .. autoexception:: RPCDispatcherError .. autoexception:: RPCVersionCapError .. autoexception:: ServerListenError .. autoexception:: UnsupportedVersion oslo.messaging-1.3.0/doc/source/rpcclient.rst0000664000175300017540000000021312316527457022372 0ustar jenkinsjenkins00000000000000---------- RPC Client ---------- .. currentmodule:: oslo.messaging .. autoclass:: RPCClient :members: .. autoexception:: RemoteError oslo.messaging-1.3.0/doc/source/transport.rst0000664000175300017540000000035612316527457022453 0ustar jenkinsjenkins00000000000000--------- Transport --------- .. currentmodule:: oslo.messaging .. autofunction:: get_transport .. autoclass:: Transport .. autoclass:: TransportURL :members: .. autoclass:: TransportHost .. autofunction:: set_transport_defaults oslo.messaging-1.3.0/doc/source/conf.py0000664000175300017540000000410112316527457021154 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'oslosphinx'] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # Add any paths that contain templates here, relative to this directory. # templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'oslo.messaging' copyright = u'2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" html_last_updated_fmt = os.popen(git_cmd).read() # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, '%s Documentation' % project, 'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None} oslo.messaging-1.3.0/doc/source/conffixture.rst0000664000175300017540000000023612316527457022750 0ustar jenkinsjenkins00000000000000---------------------- Testing Configurations ---------------------- .. currentmodule:: oslo.messaging.conffixture .. autoclass:: ConfFixture :members: oslo.messaging-1.3.0/doc/source/index.rst0000664000175300017540000001333112316527460021515 0ustar jenkinsjenkins00000000000000oslo.messaging ============== The Oslo messaging API supports RPC and notifications over a number of different messsaging transports. Contents ======== .. toctree:: :maxdepth: 1 transport target server rpcclient notifier notification_listener serializer exceptions opts conffixture Release Notes ============= 1.3.0 ----- Changes since 1.3.0a9: * Expose PublishErrorsHandler in the public API * 1288425_: Add kombu driver library to requirements.txt * 1255239_: Add unit tests for the qpid driver * 1261631_: Add unit test for Qpid reconnect order * 1282706_: Fixed inconsistent eventlet test failures * 1297161_: Fixed pep8 failure due to pyflakes bug * 1286984_: Build log_handler documentation .. _1288425: https://bugs.launchpad.net/oslo.messaging/+bug/1288425 .. _1255239: https://bugs.launchpad.net/oslo.messaging/+bug/1255239 .. _1261631: https://bugs.launchpad.net/oslo.messaging/+bug/1261631 .. _1282706: https://bugs.launchpad.net/oslo.messaging/+bug/1282706 .. _1297161: https://bugs.launchpad.net/oslo.messaging/+bug/1297161 .. _1286984: https://bugs.launchpad.net/oslo.messaging/+bug/1286984 Thanks to Alex Holden, ChangBo Guo, Clint Byrum, Doug Hellmann, Ihar Hrachyshka, Lance Bragstad and Numan Siddique for their contributions to this release. 1.3.0a9 ------- Changes since 1.3.0a8: * 856764_: Handle RabbitMQ consumer cancel notifications * 856764_: Slow down RabbitMQ reconnection attempts * 1287542_: Fix issue with duplicate SSL config options registered .. _856764: https://bugs.launchpad.net/oslo.messaging/+bug/856764 .. _1287542: https://bugs.launchpad.net/oslo.messaging/+bug/1287542 Thanks to Chet Burgess, Doug Hellmann and Nicolas Simonds for their contributions to this release. 1.3.0a8 ------- Changes since 1.3.0a7: * notification_listener_: More notification listener API additions * log_handler_: Add notifier log handler * 1282038_: Fix Qpid driver regression * 1276163_: Fix ExpectedException handling * 1261631_: Fix RabbitMQ and Qpid reconnection behaviour * 1269890_: Support different RabbitMQ login methods * 1277168_: Switch from oslo.sphinx to oslosphinx * Convert to oslo.test library * Improve configuration help strings .. _notification_listener: https://blueprints.launchpad.net/oslo.messaging/+spec/notification-subscriber-server .. _log_handler: https://blueprints.launchpad.net/oslo.messaging/+spec/log-handler .. _1282038: https://bugs.launchpad.net/oslo.messaging/+bug/1282038 .. _1276163: https://bugs.launchpad.net/oslo.messaging/+bug/1276163 .. _1261631: https://bugs.launchpad.net/oslo.messaging/+bug/1261631 .. _1269890: https://bugs.launchpad.net/oslo.messaging/+bug/1269890 .. _1277168: https://bugs.launchpad.net/oslo.messaging/+bug/1277168 Thanks to Andreas Jaeger, Ben Nemec, Dirk Mueller, Doug Hellmann, Flavio Percoco, Ihar Hrachyshka, Jeremy Hanmer, Joe Harrison, Kurt Griffiths, Lance Bragstad, Mehdi Abaakouk and Xavier Queralt for their contributions to this release. 1.3.0a7 ------- Changes since 1.3.0a6: * notification_listener_: Add notification listener API * 1272271_: Fix regression in RabbitMQ reconnection support * 1273455_: Remove use of deprecated stevedore API .. _notification_listener: https://blueprints.launchpad.net/oslo.messaging/+spec/notification-subscriber-server .. _1272271: https://bugs.launchpad.net/oslo.messaging/+bug/1272271 .. _1273455: https://bugs.launchpad.net/oslo.messaging/+bug/1273455 Thanks to Ala Rezmerita, Doug Hellmann and Mehdi Abaakouk for their contributions to this release. 1.3.0a6 ------- Changes since 1.3.0a5: * 1241566_: Enable sample config file generator include oslo.messaging options .. _1241566: https://bugs.launchpad.net/oslo.messaging/+bug/1241566 Thanks to Andreas Jaeger for his contributions to this release. 1.3.0a5 ------- Changes since 1.3.0a3: * routing_notifier_: Add a routing notifier .. _routing_notifier: http://blueprints.launchpad.net/oslo.messaging/+spec/configurable-notification Thanks to Dirk Mueller and Sandy Walsh for their contributions to this release. 1.3.0a3 ------- Changes since 1.3.0a2: * aliases_: Add transport aliases API * 1257293_: Fix duplicate topic messages for Qpid topology=2 * 1251757_: Fix issue with qpid driver reconnects * Add Sample priority to notifier API * Remove eventlet related code in amqp driver * Significant progress on Python 3 support. * Sync some changes from RPC code in oslo-incubator. .. _aliases: https://blueprints.launchpad.net/oslo.messaging/+spec/transport-aliases .. _1257293: https://bugs.launchpad.net/oslo/+bug/1257293 .. _1251757: https://bugs.launchpad.net/oslo/+bug/1251757 Thanks to Chang Bo Guo, Eric Guo, Ihar Hrachyshka, Joe Gordon, Kenneth Giusti, Lance Bragstad, Mehdi Abaakouk, Nikhil Manchanda, Sandy Walsh, Stanislav Kudriashev, Victor Stinner and Zhongyue Luo for their contributions to this release! 1.3.0a2 ------- Changes since 1.3.0a1: * logging_and_notification_: Notifications can now be sent using a python logging handler. * Notifier.warning() was added as an alias of Notifier.warn(). * Notifier.audit() has been added. * 1178375_: Support a new qpid topology. * TransportURL.hosts is no longer a read-only property. * MessagingException now behaves more like normal exceptions. * Fix sending of notifications. * Many internal cleanups. .. _logging_and_notification: https://blueprints.launchpad.net/oslo.messaging/+spec/logging-and-notification .. _1178375: https://bugs.launchpad.net/oslo/+bug/1178375 Thanks to Chang Bo Guo, Christian Strack, Julien Danjou, Kenneth Giusti and Russell Bryant for their contributions to this release! 1.2.0a1 ------- * Initial release of oslo.messaging_. .. _oslo.messaging: https://wiki.openstack.org/wiki/Oslo/Messaging Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` oslo.messaging-1.3.0/etc/0000775000175300017540000000000012316527535016364 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/etc/routing_notifier.yaml.sample0000664000175300017540000000134212316527457024121 0ustar jenkinsjenkins00000000000000# Setting a priority AND an event means both have to be satisfied. # # However, defining different sets for the same driver allows you # to do OR operations. # # See how this logic is modeled below: # # if (priority in info, warn or error) or # (event == compute.scheduler.run_instance) # send to messaging driver ... # # if priority == 'poll' and # event == 'bandwidth.*' # send to poll driver group_1: messaging: accepted_priorities: ['info', 'warn', 'error'] poll: accepted_priorities: ['poll'] accepted_events: ['bandwidth.*'] log: accepted_events: ['compute.instance.exists'] group_2: messaging:⋅ accepted_events: ['compute.scheduler.run_instance.*'] oslo.messaging-1.3.0/AUTHORS0000664000175300017540000000010712316527535016657 0ustar jenkinsjenkins00000000000000 Russell Bryant William Henry oslo.messaging-1.3.0/openstack-common.conf0000664000175300017540000000041512316527457021740 0ustar jenkinsjenkins00000000000000[DEFAULT] # The list of modules to copy from oslo-incubator.git module=excutils module=gettextutils module=importutils module=jsonutils module=network_utils module=py3kcompat module=timeutils # The base module to hold the copy of openstack.common base=oslo.messaging oslo.messaging-1.3.0/setup.cfg0000664000175300017540000000351212316527535017433 0ustar jenkinsjenkins00000000000000[metadata] name = oslo.messaging author = OpenStack author-email = openstack-dev@lists.openstack.org summary = Oslo Messaging API description-file = README.rst home-page = https://launchpad.net/oslo classifier = Development Status :: 4 - Beta Environment :: OpenStack Intended Audience :: Developers Intended Audience :: Information Technology License :: OSI Approved :: Apache Software License Operating System :: OS Independent Programming Language :: Python Programming Language :: Python :: 2.6 Programming Language :: Python :: 2.7 [files] packages = oslo namespace_packages = oslo [entry_points] console_scripts = oslo-messaging-zmq-receiver = oslo.messaging._cmd.zmq_receiver:main oslo.messaging.drivers = rabbit = oslo.messaging._drivers.impl_rabbit:RabbitDriver qpid = oslo.messaging._drivers.impl_qpid:QpidDriver zmq = oslo.messaging._drivers.impl_zmq:ZmqDriver # To avoid confusion kombu = oslo.messaging._drivers.impl_rabbit:RabbitDriver # This is just for internal testing fake = oslo.messaging._drivers.impl_fake:FakeDriver oslo.messaging.executors = blocking = oslo.messaging._executors.impl_blocking:BlockingExecutor eventlet = oslo.messaging._executors.impl_eventlet:EventletExecutor oslo.messaging.notify.drivers = messagingv2 = oslo.messaging.notify._impl_messaging:MessagingV2Driver messaging = oslo.messaging.notify._impl_messaging:MessagingDriver log = oslo.messaging.notify._impl_log:LogDriver test = oslo.messaging.notify._impl_test:TestDriver noop = oslo.messaging.notify._impl_noop:NoOpDriver routing = oslo.messaging.notify._impl_routing:RoutingDriver oslo.config.opts = oslo.messaging = oslo.messaging.opts:list_opts [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 [upload_sphinx] upload-dir = doc/build/html [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 oslo.messaging-1.3.0/oslo/0000775000175300017540000000000012316527535016565 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/0000775000175300017540000000000012316527535020542 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/_executors/0000775000175300017540000000000012316527535022722 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/_executors/base.py0000664000175300017540000000215712316527457024216 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class ExecutorBase(object): def __init__(self, conf, listener, dispatcher): self.conf = conf self.listener = listener self.dispatcher = dispatcher @abc.abstractmethod def start(self): "Start polling for incoming messages." @abc.abstractmethod def stop(self): "Stop polling for messages." @abc.abstractmethod def wait(self): "Wait until the executor has stopped polling." oslo.messaging-1.3.0/oslo/messaging/_executors/impl_eventlet.py0000664000175300017540000000617412316527457026156 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import eventlet from eventlet import greenpool import greenlet from oslo.config import cfg from oslo.messaging._executors import base from oslo.messaging.openstack.common import excutils _eventlet_opts = [ cfg.IntOpt('rpc_thread_pool_size', default=64, help='Size of RPC greenthread pool.'), ] def spawn_with(ctxt, pool): """This is the equivalent of a with statement but with the content of the BLOCK statement executed into a greenthread exception path grab from: http://www.python.org/dev/peps/pep-0343/ """ def complete(thread, exit): exc = True try: try: thread.wait() except Exception: exc = False if not exit(*sys.exc_info()): raise finally: if exc: exit(None, None, None) callback = ctxt.__enter__() thread = pool.spawn(callback) thread.link(complete, ctxt.__exit__) return thread class EventletExecutor(base.ExecutorBase): """A message executor which integrates with eventlet. This is an executor which polls for incoming messages from a greenthread and dispatches each message in its own greenthread. The stop() method kills the message polling greenthread and the wait() method waits for all message dispatch greenthreads to complete. """ def __init__(self, conf, listener, dispatcher): super(EventletExecutor, self).__init__(conf, listener, dispatcher) self.conf.register_opts(_eventlet_opts) self._thread = None self._greenpool = greenpool.GreenPool(self.conf.rpc_thread_pool_size) def start(self): if self._thread is not None: return @excutils.forever_retry_uncaught_exceptions def _executor_thread(): try: while True: incoming = self.listener.poll() spawn_with(ctxt=self.dispatcher(incoming), pool=self._greenpool) except greenlet.GreenletExit: return self._thread = eventlet.spawn(_executor_thread) def stop(self): if self._thread is None: return self._thread.kill() def wait(self): if self._thread is None: return self._greenpool.waitall() try: self._thread.wait() except greenlet.GreenletExit: pass self._thread = None oslo.messaging-1.3.0/oslo/messaging/_executors/__init__.py0000664000175300017540000000000012316527457025024 0ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/_executors/impl_blocking.py0000664000175300017540000000311412316527457026107 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.messaging._executors import base class BlockingExecutor(base.ExecutorBase): """A message executor which blocks the current thread. The blocking executor's start() method functions as a request processing loop - i.e. it blocks, processes messages and only returns when stop() is called from a dispatched method. Method calls are dispatched in the current thread, so only a single method call can be executing at once. This executor is likely to only be useful for simple demo programs. """ def __init__(self, conf, listener, dispatcher): super(BlockingExecutor, self).__init__(conf, listener, dispatcher) self._running = False def start(self): self._running = True while self._running: with self.dispatcher(self.listener.poll()) as callback: callback() def stop(self): self._running = False def wait(self): pass oslo.messaging-1.3.0/oslo/messaging/server.py0000664000175300017540000001225612316527457022433 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'ExecutorLoadFailure', 'MessageHandlingServer', 'MessagingServerError', 'ServerListenError', ] from stevedore import driver from oslo.messaging._drivers import base as driver_base from oslo.messaging import exceptions class MessagingServerError(exceptions.MessagingException): """Base class for all MessageHandlingServer exceptions.""" class ExecutorLoadFailure(MessagingServerError): """Raised if an executor can't be loaded.""" def __init__(self, executor, ex): msg = 'Failed to load executor "%s": %s' % (executor, ex) super(ExecutorLoadFailure, self).__init__(msg) self.executor = executor self.ex = ex class ServerListenError(MessagingServerError): """Raised if we failed to listen on a target.""" def __init__(self, target, ex): msg = 'Failed to listen on target "%s": %s' % (target, ex) super(ServerListenError, self).__init__(msg) self.target = target self.ex = ex class MessageHandlingServer(object): """Server for handling messages. Connect a transport to a dispatcher that knows how process the message using an executor that knows how the app wants to create new tasks. """ def __init__(self, transport, dispatcher, executor='blocking'): """Construct a message handling server. The dispatcher parameter is a callable which is invoked with context and message dictionaries each time a message is received. The executor parameter controls how incoming messages will be received and dispatched. By default, the most simple executor is used - the blocking executor. :param transport: the messaging transport :type transport: Transport :param dispatcher: a callable which is invoked for each method :type dispatcher: callable :param executor: name of message executor - e.g. 'eventlet', 'blocking' :type executor: str """ self.conf = transport.conf self.transport = transport self.dispatcher = dispatcher self.executor = executor try: mgr = driver.DriverManager('oslo.messaging.executors', self.executor) except RuntimeError as ex: raise ExecutorLoadFailure(self.executor, ex) else: self._executor_cls = mgr.driver self._executor = None super(MessageHandlingServer, self).__init__() def start(self): """Start handling incoming messages. This method causes the server to begin polling the transport for incoming messages and passing them to the dispatcher. Message processing will continue until the stop() method is called. The executor controls how the server integrates with the applications I/O handling strategy - it may choose to poll for messages in a new process, thread or co-operatively scheduled coroutine or simply by registering a callback with an event loop. Similarly, the executor may choose to dispatch messages in a new thread, coroutine or simply the current thread. An RPCServer subclass is available for each I/O strategy supported by the library, so choose the subclass appropriate for your program. """ if self._executor is not None: return try: listener = self.dispatcher._listen(self.transport) except driver_base.TransportDriverError as ex: raise ServerListenError(self.target, ex) self._executor = self._executor_cls(self.conf, listener, self.dispatcher) self._executor.start() def stop(self): """Stop handling incoming messages. Once this method returns, no new incoming messages will be handled by the server. However, the server may still be in the process of handling some messages. """ if self._executor is not None: self._executor.stop() def wait(self): """Wait for message processing to complete. After calling stop(), there may still be some some existing messages which have not been completely processed. The wait() method blocks until all message processing has completed. """ if self._executor is not None: self._executor.wait() self._executor = None oslo.messaging-1.3.0/oslo/messaging/serializer.py0000664000175300017540000000426512316527457023277 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['Serializer', 'NoOpSerializer'] """Provides the definition of a message serialization handler""" import abc import six @six.add_metaclass(abc.ABCMeta) class Serializer(object): """Generic (de-)serialization definition base class.""" @abc.abstractmethod def serialize_entity(self, ctxt, entity): """Serialize something to primitive form. :param ctxt: Request context, in deserialized form :param entity: Entity to be serialized :returns: Serialized form of entity """ @abc.abstractmethod def deserialize_entity(self, ctxt, entity): """Deserialize something from primitive form. :param ctxt: Request context, in deserialized form :param entity: Primitive to be deserialized :returns: Deserialized form of entity """ @abc.abstractmethod def serialize_context(self, ctxt): """Serialize a request context into a dictionary. :param ctxt: Request context :returns: Serialized form of context """ @abc.abstractmethod def deserialize_context(self, ctxt): """Deserialize a dictionary into a request context. :param ctxt: Request context dictionary :returns: Deserialized form of entity """ class NoOpSerializer(Serializer): """A serializer that does nothing.""" def serialize_entity(self, ctxt, entity): return entity def deserialize_entity(self, ctxt, entity): return entity def serialize_context(self, ctxt): return ctxt def deserialize_context(self, ctxt): return ctxt oslo.messaging-1.3.0/oslo/messaging/conffixture.py0000664000175300017540000000614012316527457023454 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['ConfFixture'] import sys import fixtures def _import_opts(conf, module, opts): __import__(module) conf.register_opts(getattr(sys.modules[module], opts)) class ConfFixture(fixtures.Fixture): """Tweak configuration options for unit testing. oslo.messaging registers a number of configuration options, but rather than directly referencing those options, users of the API should use this interface for querying and overriding certain configuration options. An example usage:: self.messaging_conf = self.useFixture(messaging.ConfFixture(cfg.CONF)) self.messaging_conf.transport_driver = 'fake' :param conf: a ConfigOpts instance :type conf: oslo.config.cfg.ConfigOpts """ def __init__(self, conf): self.conf = conf _import_opts(self.conf, 'oslo.messaging._drivers.impl_rabbit', 'rabbit_opts') _import_opts(self.conf, 'oslo.messaging._drivers.impl_qpid', 'qpid_opts') _import_opts(self.conf, 'oslo.messaging._drivers.amqp', 'amqp_opts') _import_opts(self.conf, 'oslo.messaging.rpc.client', '_client_opts') _import_opts(self.conf, 'oslo.messaging.transport', '_transport_opts') _import_opts(self.conf, 'oslo.messaging.notify.notifier', '_notifier_opts') def setUp(self): super(ConfFixture, self).setUp() self.addCleanup(self.conf.reset) @property def transport_driver(self): """The transport driver - e.g. 'rabbit', 'qpid' or 'fake'.""" return self.conf.rpc_backend @transport_driver.setter def transport_driver(self, value): self.conf.set_override('rpc_backend', value) @property def in_memory(self): """Use an in-memory transport; currently supported by rabbit driver.""" if (('rabbit' in self.transport_driver or 'kombu' in self.transport_driver)): return self.conf.fake_rabbit else: return False @in_memory.setter def in_memory(self, value): if (('rabbit' in self.transport_driver or 'kombu' in self.transport_driver)): self.conf.set_override('fake_rabbit', value) @property def response_timeout(self): """Default number of seconds to wait for a response from a call.""" return self.conf.rpc_response_timeout @response_timeout.setter def response_timeout(self, value): self.conf.set_override('rpc_response_timeout', value) oslo.messaging-1.3.0/oslo/messaging/openstack/0000775000175300017540000000000012316527535022531 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/openstack/common/0000775000175300017540000000000012316527535024021 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/openstack/common/py3kcompat/0000775000175300017540000000000012316527535026113 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/openstack/common/py3kcompat/__init__.py0000664000175300017540000000117112316527457030227 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # oslo.messaging-1.3.0/oslo/messaging/openstack/common/py3kcompat/urlutils.py0000664000175300017540000000344412316527457030360 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Python2/Python3 compatibility layer for OpenStack """ import six if six.PY3: # python3 import urllib.error import urllib.parse import urllib.request urlencode = urllib.parse.urlencode urljoin = urllib.parse.urljoin quote = urllib.parse.quote parse_qsl = urllib.parse.parse_qsl unquote = urllib.parse.unquote unquote_plus = urllib.parse.unquote_plus urlparse = urllib.parse.urlparse urlsplit = urllib.parse.urlsplit urlunsplit = urllib.parse.urlunsplit SplitResult = urllib.parse.SplitResult urlopen = urllib.request.urlopen URLError = urllib.error.URLError pathname2url = urllib.request.pathname2url else: # python2 import urllib import urllib2 import urlparse urlencode = urllib.urlencode quote = urllib.quote unquote = urllib.unquote unquote_plus = urllib.unquote_plus parse = urlparse parse_qsl = parse.parse_qsl urljoin = parse.urljoin urlparse = parse.urlparse urlsplit = parse.urlsplit urlunsplit = parse.urlunsplit SplitResult = parse.SplitResult urlopen = urllib2.urlopen URLError = urllib2.URLError pathname2url = urllib.pathname2url oslo.messaging-1.3.0/oslo/messaging/openstack/common/timeutils.py0000664000175300017540000001424212316527457026420 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Time related utilities and helper functions. """ import calendar import datetime import time import iso8601 import six # ISO 8601 extended time format with microseconds _ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format.""" if not at: at = utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' st += ('Z' if tz == 'UTC' else tz) return st def parse_isotime(timestr): """Parse time from ISO 8601 format.""" try: return iso8601.parse_date(timestr) except iso8601.ParseError as e: raise ValueError(six.text_type(e)) except TypeError as e: raise ValueError(six.text_type(e)) def strtime(at=None, fmt=PERFECT_TIME_FORMAT): """Returns formatted utcnow.""" if not at: at = utcnow() return at.strftime(fmt) def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): """Turn a formatted time back into a datetime.""" return datetime.datetime.strptime(timestr, fmt) def normalize_time(timestamp): """Normalize time in arbitrary timezone to UTC naive object.""" offset = timestamp.utcoffset() if offset is None: return timestamp return timestamp.replace(tzinfo=None) - offset def is_older_than(before, seconds): """Return True if before is older than seconds.""" if isinstance(before, six.string_types): before = parse_strtime(before).replace(tzinfo=None) else: before = before.replace(tzinfo=None) return utcnow() - before > datetime.timedelta(seconds=seconds) def is_newer_than(after, seconds): """Return True if after is newer than seconds.""" if isinstance(after, six.string_types): after = parse_strtime(after).replace(tzinfo=None) else: after = after.replace(tzinfo=None) return after - utcnow() > datetime.timedelta(seconds=seconds) def utcnow_ts(): """Timestamp version of our utcnow function.""" if utcnow.override_time is None: # NOTE(kgriffs): This is several times faster # than going through calendar.timegm(...) return int(time.time()) return calendar.timegm(utcnow().timetuple()) def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: try: return utcnow.override_time.pop(0) except AttributeError: return utcnow.override_time return datetime.datetime.utcnow() def iso8601_from_timestamp(timestamp): """Returns a iso8601 formated date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp)) utcnow.override_time = None def set_time_override(override_time=None): """Overrides utils.utcnow. Make it return a constant time or a list thereof, one at a time. :param override_time: datetime instance or list thereof. If not given, defaults to the current UTC time. """ utcnow.override_time = override_time or datetime.datetime.utcnow() def advance_time_delta(timedelta): """Advance overridden time using a datetime.timedelta.""" assert(not utcnow.override_time is None) try: for dt in utcnow.override_time: dt += timedelta except TypeError: utcnow.override_time += timedelta def advance_time_seconds(seconds): """Advance overridden time by seconds.""" advance_time_delta(datetime.timedelta(0, seconds)) def clear_time_override(): """Remove the overridden time.""" utcnow.override_time = None def marshall_now(now=None): """Make an rpc-safe datetime with microseconds. Note: tzinfo is stripped, but not required for relative times. """ if not now: now = utcnow() return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond) def unmarshall_time(tyme): """Unmarshall a datetime dict.""" return datetime.datetime(day=tyme['day'], month=tyme['month'], year=tyme['year'], hour=tyme['hour'], minute=tyme['minute'], second=tyme['second'], microsecond=tyme['microsecond']) def delta_seconds(before, after): """Return the difference between two timing objects. Compute the difference in seconds between two date, time, or datetime objects (as a float, to microsecond resolution). """ delta = after - before return total_seconds(delta) def total_seconds(delta): """Return the total seconds of datetime.timedelta object. Compute total seconds of datetime.timedelta, datetime.timedelta doesn't have method total_seconds in Python2.6, calculate it manually. """ try: return delta.total_seconds() except AttributeError: return ((delta.days * 24 * 3600) + delta.seconds + float(delta.microseconds) / (10 ** 6)) def is_soon(dt, window): """Determines if time is going to happen in the next window seconds. :params dt: the time :params window: minimum seconds to remain to consider the time not soon :return: True if expiration is within the given duration """ soon = (utcnow() + datetime.timedelta(seconds=window)) return normalize_time(dt) <= soon oslo.messaging-1.3.0/oslo/messaging/openstack/common/network_utils.py0000664000175300017540000000520512316527457027311 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Network-related utilities and helper functions. """ from oslo.messaging.openstack.common.py3kcompat import urlutils def parse_host_port(address, default_port=None): """Interpret a string as a host:port pair. An IPv6 address MUST be escaped if accompanied by a port, because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 means both [2001:db8:85a3::8a2e:370:7334] and [2001:db8:85a3::8a2e:370]:7334. >>> parse_host_port('server01:80') ('server01', 80) >>> parse_host_port('server01') ('server01', None) >>> parse_host_port('server01', default_port=1234) ('server01', 1234) >>> parse_host_port('[::1]:80') ('::1', 80) >>> parse_host_port('[::1]') ('::1', None) >>> parse_host_port('[::1]', default_port=1234) ('::1', 1234) >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) ('2001:db8:85a3::8a2e:370:7334', 1234) """ if address[0] == '[': # Escaped ipv6 _host, _port = address[1:].split(']') host = _host if ':' in _port: port = _port.split(':')[1] else: port = default_port else: if address.count(':') == 1: host, port = address.split(':') else: # 0 means ipv4, >1 means ipv6. # We prohibit unescaped ipv6 addresses with port. host = address port = default_port return (host, None if port is None else int(port)) def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL using urlparse.urlsplit(), splitting query and fragments. This function papers over Python issue9374 when needed. The parameters are the same as urlparse.urlsplit. """ scheme, netloc, path, query, fragment = urlutils.urlsplit( url, scheme, allow_fragments) if allow_fragments and '#' in path: path, fragment = path.split('#', 1) if '?' in path: path, query = path.split('?', 1) return urlutils.SplitResult(scheme, netloc, path, query, fragment) oslo.messaging-1.3.0/oslo/messaging/openstack/common/__init__.py0000664000175300017540000000000012316527457026123 0ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/openstack/common/excutils.py0000664000175300017540000000721112316527457026237 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exception related utilities. """ import logging import sys import time import traceback import six from oslo.messaging.openstack.common.gettextutils import _ # noqa class save_and_reraise_exception(object): """Save current exception, run some code and then re-raise. In some cases the exception context can be cleared, resulting in None being attempted to be re-raised after an exception handler is run. This can happen when eventlet switches greenthreads or when running an exception handler, code raises and catches an exception. In both cases the exception context will be cleared. To work around this, we save the exception state, run handler code, and then re-raise the original exception. If another exception occurs, the saved exception is logged and the new exception is re-raised. In some cases the caller may not want to re-raise the exception, and for those circumstances this context provides a reraise flag that can be used to suppress the exception. For example:: except Exception: with save_and_reraise_exception() as ctxt: decide_if_need_reraise() if not should_be_reraised: ctxt.reraise = False """ def __init__(self): self.reraise = True def __enter__(self): self.type_, self.value, self.tb, = sys.exc_info() return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: logging.error(_('Original exception being dropped: %s'), traceback.format_exception(self.type_, self.value, self.tb)) return False if self.reraise: six.reraise(self.type_, self.value, self.tb) def forever_retry_uncaught_exceptions(infunc): def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: this_exc_message = six.u(str(exc)) if this_exc_message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if (cur_time - last_log_time > 60 or this_exc_message != last_exc_message): logging.exception( _('Unexpected exception occurred %d time(s)... ' 'retrying.') % exc_count) last_log_time = cur_time last_exc_message = this_exc_message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1) return inner_func oslo.messaging-1.3.0/oslo/messaging/openstack/common/importutils.py0000664000175300017540000000421512316527457026773 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Import related utilities and helper functions. """ import sys import traceback def import_class(import_str): """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ValueError, AttributeError): raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) def import_object(import_str, *args, **kwargs): """Import a class and return an instance of it.""" return import_class(import_str)(*args, **kwargs) def import_object_ns(name_space, import_str, *args, **kwargs): """Tries to import object from default namespace. Imports a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace. """ import_value = "%s.%s" % (name_space, import_str) try: return import_class(import_value)(*args, **kwargs) except ImportError: return import_class(import_str)(*args, **kwargs) def import_module(import_str): """Import a module.""" __import__(import_str) return sys.modules[import_str] def try_import(import_str, default=None): """Try to import a module and if it fails return default.""" try: return import_module(import_str) except ImportError: return default oslo.messaging-1.3.0/oslo/messaging/openstack/common/jsonutils.py0000664000175300017540000001511712316527457026435 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ''' JSON related utilities. This module provides a few things: 1) A handy function for getting an object down to something that can be JSON serialized. See to_primitive(). 2) Wrappers around loads() and dumps(). The dumps() wrapper will automatically use to_primitive() for you if needed. 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson is available. ''' import datetime import functools import inspect import itertools import json try: import xmlrpclib except ImportError: # NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3 # however the function and object call signatures # remained the same. This whole try/except block should # be removed and replaced with a call to six.moves once # six 1.4.2 is released. See http://bit.ly/1bqrVzu import xmlrpc.client as xmlrpclib import six from oslo.messaging.openstack.common import gettextutils from oslo.messaging.openstack.common import importutils from oslo.messaging.openstack.common import timeutils netaddr = importutils.try_import("netaddr") _nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, inspect.isfunction, inspect.isgeneratorfunction, inspect.isgenerator, inspect.istraceback, inspect.isframe, inspect.iscode, inspect.isbuiltin, inspect.isroutine, inspect.isabstract] _simple_types = (six.string_types + six.integer_types + (type(None), bool, float)) def to_primitive(value, convert_instances=False, convert_datetime=True, level=0, max_depth=3): """Convert a complex object into primitives. Handy for JSON serialization. We can optionally handle instances, but since this is a recursive function, we could have cyclical data structures. To handle cyclical data structures we could track the actual objects visited in a set, but not all objects are hashable. Instead we just track the depth of the object inspections and don't go too deep. Therefore, convert_instances=True is lossy ... be aware. """ # handle obvious types first - order of basic types determined by running # full tests on nova project, resulting in the following counts: # 572754 # 460353 # 379632 # 274610 # 199918 # 114200 # 51817 # 26164 # 6491 # 283 # 19 if isinstance(value, _simple_types): return value if isinstance(value, datetime.datetime): if convert_datetime: return timeutils.strtime(value) else: return value # value of itertools.count doesn't get caught by nasty_type_tests # and results in infinite loop when list(value) is called. if type(value) == itertools.count: return six.text_type(value) # FIXME(vish): Workaround for LP bug 852095. Without this workaround, # tests that raise an exception in a mocked method that # has a @wrap_exception with a notifier will fail. If # we up the dependency to 0.5.4 (when it is released) we # can remove this workaround. if getattr(value, '__module__', None) == 'mox': return 'mock' if level > max_depth: return '?' # The try block may not be necessary after the class check above, # but just in case ... try: recursive = functools.partial(to_primitive, convert_instances=convert_instances, convert_datetime=convert_datetime, level=level, max_depth=max_depth) if isinstance(value, dict): return dict((k, recursive(v)) for k, v in six.iteritems(value)) elif isinstance(value, (list, tuple)): return [recursive(lv) for lv in value] # It's not clear why xmlrpclib created their own DateTime type, but # for our purposes, make it a datetime type which is explicitly # handled if isinstance(value, xmlrpclib.DateTime): value = datetime.datetime(*tuple(value.timetuple())[:6]) if convert_datetime and isinstance(value, datetime.datetime): return timeutils.strtime(value) elif isinstance(value, gettextutils.Message): return value.data elif hasattr(value, 'iteritems'): return recursive(dict(value.iteritems()), level=level + 1) elif hasattr(value, '__iter__'): return recursive(list(value)) elif convert_instances and hasattr(value, '__dict__'): # Likely an instance of something. Watch for cycles. # Ignore class member vars. return recursive(value.__dict__, level=level + 1) elif netaddr and isinstance(value, netaddr.IPAddress): return six.text_type(value) else: if any(test(value) for test in _nasty_type_tests): return six.text_type(value) return value except TypeError: # Class objects are tricky since they may define something like # __iter__ defined but it isn't callable as list(). return six.text_type(value) def dumps(value, default=to_primitive, **kwargs): return json.dumps(value, default=default, **kwargs) def loads(s): return json.loads(s) def load(s): return json.load(s) try: import anyjson except ImportError: pass else: anyjson._modules.append((__name__, 'dumps', TypeError, 'loads', ValueError, 'load')) anyjson.force_implementation(__name__) oslo.messaging-1.3.0/oslo/messaging/openstack/common/gettextutils.py0000664000175300017540000003154512316527457027153 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ gettext for openstack-common modules. Usual usage in an openstack.common module: from oslo.messaging.openstack.common.gettextutils import _ # noqa """ import copy import gettext import logging import os import re try: import UserString as _userString except ImportError: import collections as _userString from babel import localedata import six _localedir = os.environ.get('oslo.messaging'.upper() + '_LOCALEDIR') _t = gettext.translation('oslo.messaging', localedir=_localedir, fallback=True) _AVAILABLE_LANGUAGES = {} USE_LAZY = False def enable_lazy(): """Convenience function for configuring _() to use lazy gettext Call this at the start of execution to enable the gettextutils._ function to use lazy gettext functionality. This is useful if your project is importing _ directly instead of using the gettextutils.install() way of importing the _ function. """ global USE_LAZY USE_LAZY = True def _(msg): if USE_LAZY: return Message(msg, 'oslo.messaging') else: if six.PY3: return _t.gettext(msg) return _t.ugettext(msg) def install(domain, lazy=False): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's install() function. The main difference from gettext.install() is that we allow overriding the default localedir (e.g. /usr/share/locale) using a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). :param domain: the translation domain :param lazy: indicates whether or not to install the lazy _() function. The lazy _() introduces a way to do deferred translation of messages by installing a _ that builds Message objects, instead of strings, which can then be lazily translated into any available locale. """ if lazy: # NOTE(mrodden): Lazy gettext functionality. # # The following introduces a deferred way to do translations on # messages in OpenStack. We override the standard _() function # and % (format string) operation to build Message objects that can # later be translated when we have more information. # # Also included below is an example LocaleHandler that translates # Messages to an associated locale, effectively allowing many logs, # each with their own locale. def _lazy_gettext(msg): """Create and return a Message object. Lazy gettext function for a given domain, it is a factory method for a project/module to get a lazy gettext function for its own translation domain (i.e. nova, glance, cinder, etc.) Message encapsulates a string so that we can translate it later when needed. """ return Message(msg, domain) from six import moves moves.builtins.__dict__['_'] = _lazy_gettext else: localedir = '%s_LOCALEDIR' % domain.upper() if six.PY3: gettext.install(domain, localedir=os.environ.get(localedir)) else: gettext.install(domain, localedir=os.environ.get(localedir), unicode=True) class Message(_userString.UserString, object): """Class used to encapsulate translatable messages.""" def __init__(self, msg, domain): # _msg is the gettext msgid and should never change self._msg = msg self._left_extra_msg = '' self._right_extra_msg = '' self._locale = None self.params = None self.domain = domain @property def data(self): # NOTE(mrodden): this should always resolve to a unicode string # that best represents the state of the message currently localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR') if self.locale: lang = gettext.translation(self.domain, localedir=localedir, languages=[self.locale], fallback=True) else: # use system locale for translations lang = gettext.translation(self.domain, localedir=localedir, fallback=True) if six.PY3: ugettext = lang.gettext else: ugettext = lang.ugettext full_msg = (self._left_extra_msg + ugettext(self._msg) + self._right_extra_msg) if self.params is not None: full_msg = full_msg % self.params return six.text_type(full_msg) @property def locale(self): return self._locale @locale.setter def locale(self, value): self._locale = value if not self.params: return # This Message object may have been constructed with one or more # Message objects as substitution parameters, given as a single # Message, or a tuple or Map containing some, so when setting the # locale for this Message we need to set it for those Messages too. if isinstance(self.params, Message): self.params.locale = value return if isinstance(self.params, tuple): for param in self.params: if isinstance(param, Message): param.locale = value return if isinstance(self.params, dict): for param in self.params.values(): if isinstance(param, Message): param.locale = value def _save_dictionary_parameter(self, dict_param): full_msg = self.data # look for %(blah) fields in string; # ignore %% and deal with the # case where % is first character on the line keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg) # if we don't find any %(blah) blocks but have a %s if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg): # apparently the full dictionary is the parameter params = copy.deepcopy(dict_param) else: params = {} for key in keys: try: params[key] = copy.deepcopy(dict_param[key]) except TypeError: # cast uncopyable thing to unicode string params[key] = six.text_type(dict_param[key]) return params def _save_parameters(self, other): # we check for None later to see if # we actually have parameters to inject, # so encapsulate if our parameter is actually None if other is None: self.params = (other, ) elif isinstance(other, dict): self.params = self._save_dictionary_parameter(other) else: # fallback to casting to unicode, # this will handle the problematic python code-like # objects that cannot be deep-copied try: self.params = copy.deepcopy(other) except TypeError: self.params = six.text_type(other) return self # overrides to be more string-like def __unicode__(self): return self.data def __str__(self): if six.PY3: return self.__unicode__() return self.data.encode('utf-8') def __getstate__(self): to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg', 'domain', 'params', '_locale'] new_dict = self.__dict__.fromkeys(to_copy) for attr in to_copy: new_dict[attr] = copy.deepcopy(self.__dict__[attr]) return new_dict def __setstate__(self, state): for (k, v) in state.items(): setattr(self, k, v) # operator overloads def __add__(self, other): copied = copy.deepcopy(self) copied._right_extra_msg += other.__str__() return copied def __radd__(self, other): copied = copy.deepcopy(self) copied._left_extra_msg += other.__str__() return copied def __mod__(self, other): # do a format string to catch and raise # any possible KeyErrors from missing parameters self.data % other copied = copy.deepcopy(self) return copied._save_parameters(other) def __mul__(self, other): return self.data * other def __rmul__(self, other): return other * self.data def __getitem__(self, key): return self.data[key] def __getslice__(self, start, end): return self.data.__getslice__(start, end) def __getattribute__(self, name): # NOTE(mrodden): handle lossy operations that we can't deal with yet # These override the UserString implementation, since UserString # uses our __class__ attribute to try and build a new message # after running the inner data string through the operation. # At that point, we have lost the gettext message id and can just # safely resolve to a string instead. ops = ['capitalize', 'center', 'decode', 'encode', 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip', 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill'] if name in ops: return getattr(self.data, name) else: return _userString.UserString.__getattribute__(self, name) def get_available_languages(domain): """Lists the available languages for the given translation domain. :param domain: the domain to get languages for """ if domain in _AVAILABLE_LANGUAGES: return copy.copy(_AVAILABLE_LANGUAGES[domain]) localedir = '%s_LOCALEDIR' % domain.upper() find = lambda x: gettext.find(domain, localedir=os.environ.get(localedir), languages=[x]) # NOTE(mrodden): en_US should always be available (and first in case # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was # renamed to locale_identifiers() in >=1.0, the requirements master list # requires >=0.9.6, uncapped, so defensively work with both. We can remove # this check when the master list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() for i in locale_identifiers: if find(i) is not None: language_list.append(i) _AVAILABLE_LANGUAGES[domain] = language_list return copy.copy(language_list) def get_localized_message(message, user_locale): """Gets a localized version of the given message in the given locale. If the message is not a Message object the message is returned as-is. If the locale is None the message is translated to the default locale. :returns: the translated message in unicode, or the original message if it could not be translated """ translated = message if isinstance(message, Message): original_locale = message.locale message.locale = user_locale translated = six.text_type(message) message.locale = original_locale return translated class LocaleHandler(logging.Handler): """Handler that can have a locale associated to translate Messages. A quick example of how to utilize the Message class above. LocaleHandler takes a locale and a target logging.Handler object to forward LogRecord objects to after translating the internal Message. """ def __init__(self, locale, target): """Initialize a LocaleHandler :param locale: locale to use for translating messages :param target: logging.Handler object to forward LogRecord objects to after translation """ logging.Handler.__init__(self) self.locale = locale self.target = target def emit(self, record): if isinstance(record.msg, Message): # set the locale and resolve to a string record.msg.locale = self.locale self.target.emit(record) oslo.messaging-1.3.0/oslo/messaging/openstack/__init__.py0000664000175300017540000000000012316527457024633 0ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/transport.py0000664000175300017540000003473012316527457023162 0ustar jenkinsjenkins00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # Copyright (c) 2012 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'DriverLoadFailure', 'InvalidTransportURL', 'Transport', 'TransportHost', 'TransportURL', 'get_transport', 'set_transport_defaults', ] from oslo.config import cfg import six from stevedore import driver from oslo.messaging import exceptions from oslo.messaging.openstack.common.py3kcompat import urlutils _transport_opts = [ cfg.StrOpt('transport_url', default=None, help='A URL representing the messaging driver to use and its ' 'full configuration. If not set, we fall back to the ' 'rpc_backend option and driver specific configuration.'), cfg.StrOpt('rpc_backend', default='rabbit', help='The messaging driver to use, defaults to rabbit. Other ' 'drivers include qpid and zmq.'), cfg.StrOpt('control_exchange', default='openstack', help='The default exchange under which topics are scoped. May ' 'be overridden by an exchange name specified in the ' 'transport_url option.'), ] def set_transport_defaults(control_exchange): """Set defaults for messaging transport configuration options. :param control_exchange: the default exchange under which topics are scoped :type control_exchange: str """ cfg.set_defaults(_transport_opts, control_exchange=control_exchange) class Transport(object): """A messaging transport. This is a mostly opaque handle for an underlying messaging transport driver. It has a single 'conf' property which is the cfg.ConfigOpts instance used to construct the transport object. """ def __init__(self, driver): self.conf = driver.conf self._driver = driver def _require_driver_features(self, requeue=False): self._driver.require_features(requeue=requeue) def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None): if not target.topic: raise exceptions.InvalidTarget('A topic is required to send', target) return self._driver.send(target, ctxt, message, wait_for_reply=wait_for_reply, timeout=timeout) def _send_notification(self, target, ctxt, message, version): if not target.topic: raise exceptions.InvalidTarget('A topic is required to send', target) self._driver.send_notification(target, ctxt, message, version) def _listen(self, target): if not (target.topic and target.server): raise exceptions.InvalidTarget('A server\'s target must have ' 'topic and server names specified', target) return self._driver.listen(target) def _listen_for_notifications(self, targets_and_priorities): for target, priority in targets_and_priorities: if not target.topic: raise exceptions.InvalidTarget('A target must have ' 'topic specified', target) return self._driver.listen_for_notifications(targets_and_priorities) def cleanup(self): """Release all resources associated with this transport.""" self._driver.cleanup() class InvalidTransportURL(exceptions.MessagingException): """Raised if transport URL is invalid.""" def __init__(self, url, msg): super(InvalidTransportURL, self).__init__(msg) self.url = url class DriverLoadFailure(exceptions.MessagingException): """Raised if a transport driver can't be loaded.""" def __init__(self, driver, ex): msg = 'Failed to load transport driver "%s": %s' % (driver, ex) super(DriverLoadFailure, self).__init__(msg) self.driver = driver self.ex = ex def get_transport(conf, url=None, allowed_remote_exmods=[], aliases=None): """A factory method for Transport objects. This method will construct a Transport object from transport configuration gleaned from the user's configuration and, optionally, a transport URL. If a transport URL is supplied as a parameter, any transport configuration contained in it takes precedence. If no transport URL is supplied, but there is a transport URL supplied in the user's configuration then that URL will take the place of the URL parameter. In both cases, any configuration not supplied in the transport URL may be taken from individual configuration parameters in the user's configuration. An example transport URL might be:: rabbit://me:passwd@host:5672/virtual_host and can either be passed as a string or a TransportURL object. :param conf: the user configuration :type conf: cfg.ConfigOpts :param url: a transport URL :type url: str or TransportURL :param allowed_remote_exmods: a list of modules which a client using this transport will deserialize remote exceptions from :type allowed_remote_exmods: list :param aliases: A map of transport alias to transport name :type aliases: dict """ conf.register_opts(_transport_opts) if not isinstance(url, TransportURL): url = url or conf.transport_url parsed = TransportURL.parse(conf, url, aliases) if not parsed.transport: raise InvalidTransportURL(url, 'No scheme specified in "%s"' % url) url = parsed kwargs = dict(default_exchange=conf.control_exchange, allowed_remote_exmods=allowed_remote_exmods) try: mgr = driver.DriverManager('oslo.messaging.drivers', url.transport, invoke_on_load=True, invoke_args=[conf, url], invoke_kwds=kwargs) except RuntimeError as ex: raise DriverLoadFailure(url.transport, ex) return Transport(mgr.driver) class TransportHost(object): """A host element of a parsed transport URL.""" def __init__(self, hostname=None, port=None, username=None, password=None): self.hostname = hostname self.port = port self.username = username self.password = password def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not self == other def __repr__(self): attrs = [] for a in ['hostname', 'port', 'username', 'password']: v = getattr(self, a) if v: attrs.append((a, repr(v))) values = ', '.join(['%s=%s' % i for i in attrs]) return '' class TransportURL(object): """A parsed transport URL. Transport URLs take the form:: transport://user:pass@host1:port[,hostN:portN]/virtual_host i.e. the scheme selects the transport driver, you may include multiple hosts in netloc and the path part is a "virtual host" partition path. :param conf: a ConfigOpts instance :type conf: oslo.config.cfg.ConfigOpts :param transport: a transport name e.g. 'rabbit' or 'qpid' :type transport: str :param virtual_host: a virtual host path e.g. '/' :type virtual_host: str :param hosts: a list of TransportHost objects :type hosts: list :param aliases: A map of transport alias to transport name :type aliases: dict """ def __init__(self, conf, transport=None, virtual_host=None, hosts=None, aliases=None): self.conf = conf self.conf.register_opts(_transport_opts) self._transport = transport self.virtual_host = virtual_host if hosts is None: self.hosts = [] else: self.hosts = hosts if aliases is None: self.aliases = {} else: self.aliases = aliases @property def transport(self): if self._transport is None: transport = self.conf.rpc_backend else: transport = self._transport return self.aliases.get(transport, transport) @transport.setter def transport(self, value): self._transport = value def __eq__(self, other): return (self.transport == other.transport and self.virtual_host == other.virtual_host and self.hosts == other.hosts) def __ne__(self, other): return not self == other def __repr__(self): attrs = [] for a in ['transport', 'virtual_host', 'hosts']: v = getattr(self, a) if v: attrs.append((a, repr(v))) values = ', '.join(['%s=%s' % i for i in attrs]) return '' def __str__(self): netlocs = [] for host in self.hosts: username = host.username password = host.password hostname = host.hostname port = host.port # Starting place for the network location netloc = '' # Build the username and password portion of the transport URL if username is not None or password is not None: if username is not None: netloc += urlutils.quote(username, '') if password is not None: netloc += ':%s' % urlutils.quote(password, '') netloc += '@' # Build the network location portion of the transport URL if hostname: if ':' in hostname: netloc += '[%s]' % hostname else: netloc += hostname if port is not None: netloc += ':%d' % port netlocs.append(netloc) # Assemble the transport URL url = '%s://%s/' % (self.transport, ','.join(netlocs)) if self.virtual_host: url += urlutils.quote(self.virtual_host) return url @classmethod def parse(cls, conf, url, aliases=None): """Parse an url. Assuming a URL takes the form of: transport://user:pass@host1:port[,hostN:portN]/virtual_host then parse the URL and return a TransportURL object. Netloc is parsed following the sequence bellow: * It is first split by ',' in order to support multiple hosts * The last parsed username and password will be propagated to the rest of hosts specified: user:passwd@host1:port1,host2:port2 [ {"username": "user", "password": "passwd", "host": "host1:port1"}, {"username": "user", "password": "passwd", "host": "host2:port2"} ] * In order to avoid the above propagation, it is possible to alter the order in which the hosts are specified or specify a set of fake credentials using ",:@host2:port2" user:passwd@host1:port1,:@host2:port2 [ {"username": "user", "password": "passwd", "host": "host1:port1"}, {"username": "", "password": "", "host": "host2:port2"} ] :param conf: a ConfigOpts instance :type conf: oslo.config.cfg.ConfigOpts :param url: The URL to parse :type url: str :param aliases: A map of transport alias to transport name :type aliases: dict :returns: A TransportURL """ if not url: return cls(conf, aliases=aliases) if not isinstance(url, six.string_types): raise InvalidTransportURL(url, 'Wrong URL type') url = urlutils.urlparse(url) # Make sure there's not a query string; that could identify # requirements we can't comply with (e.g., ssl), so reject it if # it's present if '?' in url.path or url.query: raise InvalidTransportURL(url.geturl(), "Cannot comply with query string in " "transport URL") virtual_host = None if url.path.startswith('/'): virtual_host = url.path[1:] hosts = [] username = password = '' for host in url.netloc.split(','): if not host: continue hostname = host username = password = port = None if '@' in host: username, hostname = host.split('@', 1) if ':' in username: username, password = username.split(':', 1) if not hostname: hostname = None elif hostname.startswith('['): # Find the closing ']' and extract the hostname host_end = hostname.find(']') if host_end < 0: # NOTE(Vek): Identical to what Python 2.7's # urlparse.urlparse() raises in this case raise ValueError("Invalid IPv6 URL") port_text = hostname[host_end:] hostname = hostname[1:host_end] # Now we need the port; this is compliant with how urlparse # parses the port data port = None if ':' in port_text: port = int(port_text.split(':', 1)[1]) elif ':' in hostname: hostname, port = hostname.split(':', 1) port = int(port) hosts.append(TransportHost(hostname=hostname, port=port, username=username, password=password)) return cls(conf, url.scheme, virtual_host, hosts, aliases) oslo.messaging-1.3.0/oslo/messaging/target.py0000664000175300017540000000725612316527457022417 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class Target(object): """Identifies the destination of messages. A Target encapsulates all the information to identify where a message should be sent or what messages a server is listening for. Different subsets of the information encapsulated in a Target object is relevant to various aspects of the API: creating a server: topic and server is required; exchange is optional an endpoint's target: namespace and version is optional client sending a message: topic is required, all other attributes optional Its attributes are: :param exchange: A scope for topics. Leave unspecified to default to the control_exchange configuration option. :type exchange: str :param topic: A name which identifies the set of interfaces exposed by a server. Multiple servers may listen on a topic and messages will be dispatched to one of the servers in a round-robin fashion. :type topic: str :param namespace: Identifies a particular interface (i.e. set of methods) exposed by a server. The default interface has no namespace identifier and is referred to as the null namespace. :type namespace: str :param version: Interfaces have a major.minor version number associated with them. A minor number increment indicates a backwards compatible change and an incompatible change is indicated by a major number bump. Servers may implement multiple major versions and clients may require indicate that their message requires a particular minimum minor version. :type version: str :param server: Clients can request that a message be directed to a specific server, rather than just one of a pool of servers listening on the topic. :type server: str :param fanout: Clients may request that a message be directed to all servers listening on a topic by setting fanout to ``True``, rather than just one of them. :type fanout: bool """ def __init__(self, exchange=None, topic=None, namespace=None, version=None, server=None, fanout=None): self.exchange = exchange self.topic = topic self.namespace = namespace self.version = version self.server = server self.fanout = fanout def __call__(self, **kwargs): kwargs.setdefault('exchange', self.exchange) kwargs.setdefault('topic', self.topic) kwargs.setdefault('namespace', self.namespace) kwargs.setdefault('version', self.version) kwargs.setdefault('server', self.server) kwargs.setdefault('fanout', self.fanout) return Target(**kwargs) def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not self == other def __repr__(self): attrs = [] for a in ['exchange', 'topic', 'namespace', 'version', 'server', 'fanout']: v = getattr(self, a) if v: attrs.append((a, v)) values = ', '.join(['%s=%s' % i for i in attrs]) return '' oslo.messaging-1.3.0/oslo/messaging/opts.py0000664000175300017540000000474712316527457022120 0ustar jenkinsjenkins00000000000000 # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'list_opts' ] import copy import itertools from oslo.messaging._drivers import amqp from oslo.messaging._drivers import common as drivers_common from oslo.messaging._drivers import impl_qpid from oslo.messaging._drivers import impl_rabbit from oslo.messaging._drivers import impl_zmq from oslo.messaging._drivers import matchmaker from oslo.messaging._drivers import matchmaker_redis from oslo.messaging._drivers import matchmaker_ring from oslo.messaging._executors import impl_eventlet from oslo.messaging.notify import notifier from oslo.messaging.rpc import client from oslo.messaging import transport _global_opt_lists = [ amqp.amqp_opts, drivers_common._exception_opts, impl_qpid.qpid_opts, impl_rabbit.rabbit_opts, impl_zmq.zmq_opts, matchmaker.matchmaker_opts, matchmaker_redis.matchmaker_redis_opts, impl_eventlet._eventlet_opts, notifier._notifier_opts, client._client_opts, transport._transport_opts ] _opts = [ (None, list(itertools.chain(*_global_opt_lists))), ('matchmaker_ring', matchmaker_ring.matchmaker_opts), ] def list_opts(): """Return a list of oslo.config options available in the library. The returned list includes all oslo.config options which may be registered at runtime by the library. Each element of the list is a tuple. The first element is the name of the group under which the list of elements in the second element will be registered. A group name of None corresponds to the [DEFAULT] group in config files. This function is also discoverable via the 'oslo.messaging' entry point under the 'oslo.config.opts' namespace. The purpose of this is to allow tools like the Oslo sample config file generator to discover the options exposed to users by this library. :returns: a list of (group_name, opts) tuples """ return [(g, copy.deepcopy(o)) for g, o in _opts] oslo.messaging-1.3.0/oslo/messaging/_drivers/0000775000175300017540000000000012316527535022357 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/_drivers/impl_rabbit.py0000664000175300017540000007267312316527457025237 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import itertools import logging import socket import ssl import time import uuid import kombu import kombu.connection import kombu.entity import kombu.messaging from oslo.config import cfg import six from oslo.messaging._drivers import amqp as rpc_amqp from oslo.messaging._drivers import amqpdriver from oslo.messaging._drivers import common as rpc_common from oslo.messaging.openstack.common import network_utils # FIXME(markmc): remove this _ = lambda s: s rabbit_opts = [ cfg.StrOpt('kombu_ssl_version', default='', help='SSL version to use (valid only if SSL enabled). ' 'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may ' 'be available on some distributions.' ), cfg.StrOpt('kombu_ssl_keyfile', default='', help='SSL key file (valid only if SSL enabled).'), cfg.StrOpt('kombu_ssl_certfile', default='', help='SSL cert file (valid only if SSL enabled).'), cfg.StrOpt('kombu_ssl_ca_certs', default='', help=('SSL certification authority file ' '(valid only if SSL enabled).')), cfg.FloatOpt('kombu_reconnect_delay', default=1.0, help='How long to wait before reconnecting in response to an ' 'AMQP consumer cancel notification.'), cfg.StrOpt('rabbit_host', default='localhost', help='The RabbitMQ broker address where a single node is ' 'used.'), cfg.IntOpt('rabbit_port', default=5672, help='The RabbitMQ broker port where a single node is used.'), cfg.ListOpt('rabbit_hosts', default=['$rabbit_host:$rabbit_port'], help='RabbitMQ HA cluster host:port pairs.'), cfg.BoolOpt('rabbit_use_ssl', default=False, help='Connect over SSL for RabbitMQ.'), cfg.StrOpt('rabbit_userid', default='guest', help='The RabbitMQ userid.'), cfg.StrOpt('rabbit_password', default='guest', help='The RabbitMQ password.', secret=True), cfg.StrOpt('rabbit_login_method', default='AMQPLAIN', help='the RabbitMQ login method'), cfg.StrOpt('rabbit_virtual_host', default='/', help='The RabbitMQ virtual host.'), cfg.IntOpt('rabbit_retry_interval', default=1, help='How frequently to retry connecting with RabbitMQ.'), cfg.IntOpt('rabbit_retry_backoff', default=2, help='How long to backoff for between retries when connecting ' 'to RabbitMQ.'), cfg.IntOpt('rabbit_max_retries', default=0, help='Maximum number of RabbitMQ connection retries. ' 'Default is 0 (infinite retry count).'), cfg.BoolOpt('rabbit_ha_queues', default=False, help='Use HA queues in RabbitMQ (x-ha-policy: all). ' 'If you change this option, you must wipe the ' 'RabbitMQ database.'), # FIXME(markmc): this was toplevel in openstack.common.rpc cfg.BoolOpt('fake_rabbit', default=False, help='If passed, use a fake RabbitMQ provider.'), ] LOG = logging.getLogger(__name__) def _get_queue_arguments(conf): """Construct the arguments for declaring a queue. If the rabbit_ha_queues option is set, we declare a mirrored queue as described here: http://www.rabbitmq.com/ha.html Setting x-ha-policy to all means that the queue will be mirrored to all nodes in the cluster. """ return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {} class RabbitMessage(dict): def __init__(self, raw_message): super(RabbitMessage, self).__init__( rpc_common.deserialize_msg(raw_message.payload)) self._raw_message = raw_message def acknowledge(self): self._raw_message.ack() def requeue(self): self._raw_message.requeue() class ConsumerBase(object): """Consumer base class.""" def __init__(self, channel, callback, tag, **kwargs): """Declare a queue on an amqp channel. 'channel' is the amqp channel to use 'callback' is the callback to call when messages are received 'tag' is a unique ID for the consumer on the channel queue name, exchange name, and other kombu options are passed in here as a dictionary. """ self.callback = callback self.tag = str(tag) self.kwargs = kwargs self.queue = None self.reconnect(channel) def reconnect(self, channel): """Re-declare the queue after a rabbit reconnect.""" self.channel = channel self.kwargs['channel'] = channel self.queue = kombu.entity.Queue(**self.kwargs) self.queue.declare() def _callback_handler(self, message, callback): """Call callback with deserialized message. Messages that are processed and ack'ed. """ try: callback(RabbitMessage(message)) except Exception: LOG.exception(_("Failed to process message" " ... skipping it.")) message.ack() def consume(self, *args, **kwargs): """Actually declare the consumer on the amqp channel. This will start the flow of messages from the queue. Using the Connection.iterconsume() iterator will process the messages, calling the appropriate callback. If a callback is specified in kwargs, use that. Otherwise, use the callback passed during __init__() If kwargs['nowait'] is True, then this call will block until a message is read. """ options = {'consumer_tag': self.tag} options['nowait'] = kwargs.get('nowait', False) callback = kwargs.get('callback', self.callback) if not callback: raise ValueError("No callback defined") def _callback(raw_message): message = self.channel.message_to_python(raw_message) self._callback_handler(message, callback) self.queue.consume(*args, callback=_callback, **options) def cancel(self): """Cancel the consuming from the queue, if it has started.""" try: self.queue.cancel(self.tag) except KeyError as e: # NOTE(comstud): Kludge to get around a amqplib bug if str(e) != "u'%s'" % self.tag: raise self.queue = None class DirectConsumer(ConsumerBase): """Queue/consumer class for 'direct'.""" def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): """Init a 'direct' queue. 'channel' is the amqp channel to use 'msg_id' is the msg_id to listen on 'callback' is the callback to call when messages are received 'tag' is a unique ID for the consumer on the channel Other kombu options may be passed """ # Default options options = {'durable': False, 'queue_arguments': _get_queue_arguments(conf), 'auto_delete': True, 'exclusive': False} options.update(kwargs) exchange = kombu.entity.Exchange(name=msg_id, type='direct', durable=options['durable'], auto_delete=options['auto_delete']) super(DirectConsumer, self).__init__(channel, callback, tag, name=msg_id, exchange=exchange, routing_key=msg_id, **options) class TopicConsumer(ConsumerBase): """Consumer class for 'topic'.""" def __init__(self, conf, channel, topic, callback, tag, name=None, exchange_name=None, **kwargs): """Init a 'topic' queue. :param channel: the amqp channel to use :param topic: the topic to listen on :paramtype topic: str :param callback: the callback to call when messages are received :param tag: a unique ID for the consumer on the channel :param name: optional queue name, defaults to topic :paramtype name: str Other kombu options may be passed as keyword arguments """ # Default options options = {'durable': conf.amqp_durable_queues, 'queue_arguments': _get_queue_arguments(conf), 'auto_delete': conf.amqp_auto_delete, 'exclusive': False} options.update(kwargs) exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) exchange = kombu.entity.Exchange(name=exchange_name, type='topic', durable=options['durable'], auto_delete=options['auto_delete']) super(TopicConsumer, self).__init__(channel, callback, tag, name=name or topic, exchange=exchange, routing_key=topic, **options) class FanoutConsumer(ConsumerBase): """Consumer class for 'fanout'.""" def __init__(self, conf, channel, topic, callback, tag, **kwargs): """Init a 'fanout' queue. 'channel' is the amqp channel to use 'topic' is the topic to listen on 'callback' is the callback to call when messages are received 'tag' is a unique ID for the consumer on the channel Other kombu options may be passed """ unique = uuid.uuid4().hex exchange_name = '%s_fanout' % topic queue_name = '%s_fanout_%s' % (topic, unique) # Default options options = {'durable': False, 'queue_arguments': _get_queue_arguments(conf), 'auto_delete': True, 'exclusive': False} options.update(kwargs) exchange = kombu.entity.Exchange(name=exchange_name, type='fanout', durable=options['durable'], auto_delete=options['auto_delete']) super(FanoutConsumer, self).__init__(channel, callback, tag, name=queue_name, exchange=exchange, routing_key=topic, **options) class Publisher(object): """Base Publisher class.""" def __init__(self, channel, exchange_name, routing_key, **kwargs): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.exchange_name = exchange_name self.routing_key = routing_key self.kwargs = kwargs self.reconnect(channel) def reconnect(self, channel): """Re-establish the Producer after a rabbit reconnection.""" self.exchange = kombu.entity.Exchange(name=self.exchange_name, **self.kwargs) self.producer = kombu.messaging.Producer(exchange=self.exchange, channel=channel, routing_key=self.routing_key) def send(self, msg, timeout=None): """Send a message.""" if timeout: # # AMQP TTL is in milliseconds when set in the header. # self.producer.publish(msg, headers={'ttl': (timeout * 1000)}) else: self.producer.publish(msg) class DirectPublisher(Publisher): """Publisher class for 'direct'.""" def __init__(self, conf, channel, msg_id, **kwargs): """Init a 'direct' publisher. Kombu options may be passed as keyword args to override defaults """ options = {'durable': False, 'auto_delete': True, 'exclusive': False} options.update(kwargs) super(DirectPublisher, self).__init__(channel, msg_id, msg_id, type='direct', **options) class TopicPublisher(Publisher): """Publisher class for 'topic'.""" def __init__(self, conf, channel, topic, **kwargs): """Init a 'topic' publisher. Kombu options may be passed as keyword args to override defaults """ options = {'durable': conf.amqp_durable_queues, 'auto_delete': conf.amqp_auto_delete, 'exclusive': False} options.update(kwargs) exchange_name = rpc_amqp.get_control_exchange(conf) super(TopicPublisher, self).__init__(channel, exchange_name, topic, type='topic', **options) class FanoutPublisher(Publisher): """Publisher class for 'fanout'.""" def __init__(self, conf, channel, topic, **kwargs): """Init a 'fanout' publisher. Kombu options may be passed as keyword args to override defaults """ options = {'durable': False, 'auto_delete': True, 'exclusive': False} options.update(kwargs) super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic, None, type='fanout', **options) class NotifyPublisher(TopicPublisher): """Publisher class for 'notify'.""" def __init__(self, conf, channel, topic, **kwargs): self.durable = kwargs.pop('durable', conf.amqp_durable_queues) self.queue_arguments = _get_queue_arguments(conf) super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) def reconnect(self, channel): super(NotifyPublisher, self).reconnect(channel) # NOTE(jerdfelt): Normally the consumer would create the queue, but # we do this to ensure that messages don't get dropped if the # consumer is started after we do queue = kombu.entity.Queue(channel=channel, exchange=self.exchange, durable=self.durable, name=self.routing_key, routing_key=self.routing_key, queue_arguments=self.queue_arguments) queue.declare() class Connection(object): """Connection object.""" pool = None def __init__(self, conf, server_params=None): self.consumers = [] self.conf = conf self.max_retries = self.conf.rabbit_max_retries # Try forever? if self.max_retries <= 0: self.max_retries = None self.interval_start = self.conf.rabbit_retry_interval self.interval_stepping = self.conf.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 self.memory_transport = False if server_params is None: server_params = {} # Keys to translate from server_params to kombu params server_params_to_kombu_params = {'username': 'userid'} ssl_params = self._fetch_ssl_params() params_list = [] for adr in self.conf.rabbit_hosts: hostname, port = network_utils.parse_host_port( adr, default_port=self.conf.rabbit_port) params = { 'hostname': hostname, 'port': port, 'userid': self.conf.rabbit_userid, 'password': self.conf.rabbit_password, 'login_method': self.conf.rabbit_login_method, 'virtual_host': self.conf.rabbit_virtual_host, } for sp_key, value in six.iteritems(server_params): p_key = server_params_to_kombu_params.get(sp_key, sp_key) params[p_key] = value if self.conf.fake_rabbit: params['transport'] = 'memory' if self.conf.rabbit_use_ssl: params['ssl'] = ssl_params params_list.append(params) self.params_list = itertools.cycle(params_list) self.memory_transport = self.conf.fake_rabbit self.connection = None self.do_consume = None self.reconnect() # FIXME(markmc): use oslo sslutils when it is available as a library _SSL_PROTOCOLS = { "tlsv1": ssl.PROTOCOL_TLSv1, "sslv23": ssl.PROTOCOL_SSLv23, "sslv3": ssl.PROTOCOL_SSLv3 } try: _SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2 except AttributeError: pass @classmethod def validate_ssl_version(cls, version): key = version.lower() try: return cls._SSL_PROTOCOLS[key] except KeyError: raise RuntimeError(_("Invalid SSL version : %s") % version) def _fetch_ssl_params(self): """Handles fetching what ssl params should be used for the connection (if any). """ ssl_params = dict() # http://docs.python.org/library/ssl.html - ssl.wrap_socket if self.conf.kombu_ssl_version: ssl_params['ssl_version'] = self.validate_ssl_version( self.conf.kombu_ssl_version) if self.conf.kombu_ssl_keyfile: ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile if self.conf.kombu_ssl_certfile: ssl_params['certfile'] = self.conf.kombu_ssl_certfile if self.conf.kombu_ssl_ca_certs: ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs # We might want to allow variations in the # future with this? ssl_params['cert_reqs'] = ssl.CERT_REQUIRED # Return the extended behavior or just have the default behavior return ssl_params or True def _connect(self, params): """Connect to rabbit. Re-establish any queues that may have been declared before if we are reconnecting. Exceptions should be handled by the caller. """ if self.connection: LOG.info(_("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % params) try: # XXX(nic): when reconnecting to a RabbitMQ cluster # with mirrored queues in use, the attempt to release the # connection can hang "indefinitely" somewhere deep down # in Kombu. Blocking the thread for a bit prior to # release seems to kludge around the problem where it is # otherwise reproduceable. if self.conf.kombu_reconnect_delay > 0: LOG.info(_("Delaying reconnect for %1.1f seconds...") % self.conf.kombu_reconnect_delay) time.sleep(self.conf.kombu_reconnect_delay) self.connection.release() except self.connection_errors: pass # Setting this in case the next statement fails, though # it shouldn't be doing any network operations, yet. self.connection = None self.connection = kombu.connection.BrokerConnection(**params) self.connection_errors = self.connection.connection_errors self.channel_errors = self.connection.channel_errors if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.do_consume = True self.consumer_num = itertools.count(1) self.connection.connect() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % params) def reconnect(self): """Handles reconnecting and re-establishing queues. Will retry up to self.max_retries number of times. self.max_retries = 0 means to retry forever. Sleep between tries, starting at self.interval_start seconds, backing off self.interval_stepping number of seconds each attempt. """ attempt = 0 while True: params = six.next(self.params_list) attempt += 1 try: self._connect(params) return except IOError as e: pass except self.connection_errors as e: pass except Exception as e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621) # So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries log_info.update(params) if self.max_retries and attempt == self.max_retries: msg = _('Unable to connect to AMQP server on ' '%(hostname)s:%(port)d after %(max_retries)d ' 'tries: %(err_str)s') % log_info LOG.error(msg) raise rpc_common.RPCException(msg) if attempt == 1: sleep_time = self.interval_start or 1 elif attempt > 1: sleep_time += self.interval_stepping if self.interval_max: sleep_time = min(sleep_time, self.interval_max) log_info['sleep_time'] = sleep_time LOG.error(_('AMQP server on %(hostname)s:%(port)d is ' 'unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time) def ensure(self, error_callback, method, *args, **kwargs): while True: try: return method(*args, **kwargs) except self.connection_errors as e: if error_callback: error_callback(e) except self.channel_errors as e: if error_callback: error_callback(e) except (socket.timeout, IOError) as e: if error_callback: error_callback(e) except Exception as e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621) # So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise if error_callback: error_callback(e) self.reconnect() def get_channel(self): """Convenience call for bin/clear_rabbit_queues.""" return self.channel def close(self): """Close/release this connection.""" self.connection.release() self.connection = None def reset(self): """Reset a connection so it can be used again.""" self.channel.close() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') self.consumers = [] def declare_consumer(self, consumer_cls, topic, callback): """Create a Consumer using the class that was passed in and add it to our list of consumers """ def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.error(_("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s") % log_info) def _declare_consumer(): consumer = consumer_cls(self.conf, self.channel, topic, callback, six.next(self.consumer_num)) self.consumers.append(consumer) return consumer return self.ensure(_connect_error, _declare_consumer) def iterconsume(self, limit=None, timeout=None): """Return an iterator that will consume from all queues/consumers.""" def _error_callback(exc): if isinstance(exc, socket.timeout): LOG.debug(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc)) self.do_consume = True def _consume(): if self.do_consume: queues_head = self.consumers[:-1] # not fanout. queues_tail = self.consumers[-1] # fanout for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) self.do_consume = False return self.connection.drain_events(timeout=timeout) for iteration in itertools.count(0): if limit and iteration >= limit: raise StopIteration yield self.ensure(_error_callback, _consume) def publisher_send(self, cls, topic, msg, timeout=None, **kwargs): """Send to a publisher based on the publisher class.""" def _error_callback(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.exception(_("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info) def _publish(): publisher = cls(self.conf, self.channel, topic, **kwargs) publisher.send(msg, timeout) self.ensure(_error_callback, _publish) def declare_direct_consumer(self, topic, callback): """Create a 'direct' queue. In nova's use, this is generally a msg_id queue used for responses for call/multicall """ self.declare_consumer(DirectConsumer, topic, callback) def declare_topic_consumer(self, topic, callback=None, queue_name=None, exchange_name=None): """Create a 'topic' consumer.""" self.declare_consumer(functools.partial(TopicConsumer, name=queue_name, exchange_name=exchange_name, ), topic, callback) def declare_fanout_consumer(self, topic, callback): """Create a 'fanout' consumer.""" self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message.""" self.publisher_send(DirectPublisher, msg_id, msg) def topic_send(self, topic, msg, timeout=None): """Send a 'topic' message.""" self.publisher_send(TopicPublisher, topic, msg, timeout) def fanout_send(self, topic, msg): """Send a 'fanout' message.""" self.publisher_send(FanoutPublisher, topic, msg) def notify_send(self, topic, msg, **kwargs): """Send a notify message on a topic.""" self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs) def consume(self, limit=None, timeout=None): """Consume from all queues/consumers.""" it = self.iterconsume(limit=limit, timeout=timeout) while True: try: six.next(it) except StopIteration: return class RabbitDriver(amqpdriver.AMQPDriverBase): def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=[]): conf.register_opts(rabbit_opts) conf.register_opts(rpc_amqp.amqp_opts) connection_pool = rpc_amqp.get_connection_pool(conf, Connection) super(RabbitDriver, self).__init__(conf, url, connection_pool, default_exchange, allowed_remote_exmods) def require_features(self, requeue=True): pass oslo.messaging-1.3.0/oslo/messaging/_drivers/base.py0000664000175300017540000000544612316527457023657 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from oslo.messaging import exceptions class TransportDriverError(exceptions.MessagingException): """Base class for transport driver specific exceptions.""" @six.add_metaclass(abc.ABCMeta) class IncomingMessage(object): def __init__(self, listener, ctxt, message): self.conf = listener.conf self.listener = listener self.ctxt = ctxt self.message = message @abc.abstractmethod def reply(self, reply=None, failure=None, log_failure=True): "Send a reply or failure back to the client." def acknowledge(self): "Acknowledge the message." @abc.abstractmethod def requeue(self): "Requeue the message." @six.add_metaclass(abc.ABCMeta) class Listener(object): def __init__(self, driver): self.conf = driver.conf self.driver = driver @abc.abstractmethod def poll(self): "Blocking until a message is pending and return IncomingMessage." @six.add_metaclass(abc.ABCMeta) class BaseDriver(object): def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=[]): self.conf = conf self._url = url self._default_exchange = default_exchange self._allowed_remote_exmods = allowed_remote_exmods def require_features(self, requeue=False): if requeue: raise NotImplementedError('Message requeueing not supported by ' 'this transport driver') @abc.abstractmethod def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, envelope=False): """Send a message to the given target.""" @abc.abstractmethod def send_notification(self, target, ctxt, message, version): """Send a notification message to the given target.""" @abc.abstractmethod def listen(self, target): """Construct a Listener for the given target.""" @abc.abstractmethod def listen_for_notifications(self, targets_and_priorities): """Construct a notification Listener for the given list of tuple of (target, priority). """ @abc.abstractmethod def cleanup(self): """Release all resources.""" oslo.messaging-1.3.0/oslo/messaging/_drivers/amqpdriver.py0000664000175300017540000003676112316527460025115 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['AMQPDriverBase'] import logging import threading import uuid from six import moves from oslo import messaging from oslo.messaging._drivers import amqp as rpc_amqp from oslo.messaging._drivers import base from oslo.messaging._drivers import common as rpc_common LOG = logging.getLogger(__name__) class AMQPIncomingMessage(base.IncomingMessage): def __init__(self, listener, ctxt, message, unique_id, msg_id, reply_q): super(AMQPIncomingMessage, self).__init__(listener, ctxt, dict(message)) self.unique_id = unique_id self.msg_id = msg_id self.reply_q = reply_q self.acknowledge_callback = message.acknowledge self.requeue_callback = message.requeue def _send_reply(self, conn, reply=None, failure=None, ending=False, log_failure=True): if failure: failure = rpc_common.serialize_remote_exception(failure, log_failure) msg = {'result': reply, 'failure': failure} if ending: msg['ending'] = True rpc_amqp._add_unique_id(msg) # If a reply_q exists, add the msg_id to the reply and pass the # reply_q to direct_send() to use it as the response queue. # Otherwise use the msg_id for backward compatibility. if self.reply_q: msg['_msg_id'] = self.msg_id conn.direct_send(self.reply_q, rpc_common.serialize_msg(msg)) else: conn.direct_send(self.msg_id, rpc_common.serialize_msg(msg)) def reply(self, reply=None, failure=None, log_failure=True): with self.listener.driver._get_connection() as conn: self._send_reply(conn, reply, failure, log_failure=log_failure) self._send_reply(conn, ending=True) def acknowledge(self): self.listener.msg_id_cache.add(self.unique_id) self.acknowledge_callback() def requeue(self): # NOTE(sileht): In case of the connection is lost between receiving the # message and requeing it, this requeue call fail # but because the message is not acknowledged and not added to the # msg_id_cache, the message will be reconsumed, the only difference is # the message stay at the beginning of the queue instead of moving to # the end. self.requeue_callback() class AMQPListener(base.Listener): def __init__(self, driver, conn): super(AMQPListener, self).__init__(driver) self.conn = conn self.msg_id_cache = rpc_amqp._MsgIdCache() self.incoming = [] def __call__(self, message): # FIXME(markmc): logging isn't driver specific rpc_common._safe_log(LOG.debug, 'received %s', dict(message)) unique_id = self.msg_id_cache.check_duplicate_message(message) ctxt = rpc_amqp.unpack_context(self.conf, message) self.incoming.append(AMQPIncomingMessage(self, ctxt.to_dict(), message, unique_id, ctxt.msg_id, ctxt.reply_q)) def poll(self): while True: if self.incoming: return self.incoming.pop(0) self.conn.consume(limit=1) class ReplyWaiters(object): WAKE_UP = object() def __init__(self): self._queues = {} self._wrn_threshold = 10 def get(self, msg_id, timeout): try: return self._queues[msg_id].get(block=True, timeout=timeout) except moves.queue.Empty: raise messaging.MessagingTimeout('Timed out waiting for a reply ' 'to message ID %s' % msg_id) def check(self, msg_id): try: return self._queues[msg_id].get(block=False) except moves.queue.Empty: return None def put(self, msg_id, message_data): queue = self._queues.get(msg_id) if not queue: LOG.warn('No calling threads waiting for msg_id : %(msg_id)s' ', message : %(data)s', {'msg_id': msg_id, 'data': message_data}) LOG.warn('_queues: %s' % str(self._queues)) else: queue.put(message_data) def wake_all(self, except_id): msg_ids = [i for i in self._queues.keys() if i != except_id] for msg_id in msg_ids: self.put(msg_id, self.WAKE_UP) def add(self, msg_id, queue): self._queues[msg_id] = queue if len(self._queues) > self._wrn_threshold: LOG.warn('Number of call queues is greater than warning ' 'threshold: %d. There could be a leak.' % self._wrn_threshold) self._wrn_threshold *= 2 def remove(self, msg_id): del self._queues[msg_id] class ReplyWaiter(object): def __init__(self, conf, reply_q, conn, allowed_remote_exmods): self.conf = conf self.conn = conn self.reply_q = reply_q self.allowed_remote_exmods = allowed_remote_exmods self.conn_lock = threading.Lock() self.incoming = [] self.msg_id_cache = rpc_amqp._MsgIdCache() self.waiters = ReplyWaiters() conn.declare_direct_consumer(reply_q, self) def __call__(self, message): message.acknowledge() self.incoming.append(message) def listen(self, msg_id): queue = moves.queue.Queue() self.waiters.add(msg_id, queue) def unlisten(self, msg_id): self.waiters.remove(msg_id) def _process_reply(self, data): result = None ending = False self.msg_id_cache.check_duplicate_message(data) if data['failure']: failure = data['failure'] result = rpc_common.deserialize_remote_exception( failure, self.allowed_remote_exmods) elif data.get('ending', False): ending = True else: result = data['result'] return result, ending def _poll_connection(self, msg_id, timeout): while True: while self.incoming: message_data = self.incoming.pop(0) incoming_msg_id = message_data.pop('_msg_id', None) if incoming_msg_id == msg_id: return self._process_reply(message_data) self.waiters.put(incoming_msg_id, message_data) try: self.conn.consume(limit=1, timeout=timeout) except rpc_common.Timeout: raise messaging.MessagingTimeout('Timed out waiting for a ' 'reply to message ID %s' % msg_id) def _poll_queue(self, msg_id, timeout): message = self.waiters.get(msg_id, timeout) if message is self.waiters.WAKE_UP: return None, None, True # lock was released reply, ending = self._process_reply(message) return reply, ending, False def _check_queue(self, msg_id): while True: message = self.waiters.check(msg_id) if message is self.waiters.WAKE_UP: continue if message is None: return None, None, True # queue is empty reply, ending = self._process_reply(message) return reply, ending, False def wait(self, msg_id, timeout): # # NOTE(markmc): we're waiting for a reply for msg_id to come in for on # the reply_q, but there may be other threads also waiting for replies # to other msg_ids # # Only one thread can be consuming from the queue using this connection # and we don't want to hold open a connection per thread, so instead we # have the first thread take responsibility for passing replies not # intended for itself to the appropriate thread. # final_reply = None while True: if self.conn_lock.acquire(False): # Ok, we're the thread responsible for polling the connection try: # Check the queue to see if a previous lock-holding thread # queued up a reply already while True: reply, ending, empty = self._check_queue(msg_id) if empty: break if not ending: final_reply = reply else: return final_reply # Now actually poll the connection while True: reply, ending = self._poll_connection(msg_id, timeout) if not ending: final_reply = reply else: return final_reply finally: self.conn_lock.release() # We've got our reply, tell the other threads to wake up # so that one of them will take over the responsibility for # polling the connection self.waiters.wake_all(msg_id) else: # We're going to wait for the first thread to pass us our reply reply, ending, trylock = self._poll_queue(msg_id, timeout) if trylock: # The first thread got its reply, let's try and take over # the responsibility for polling continue if not ending: final_reply = reply else: return final_reply class AMQPDriverBase(base.BaseDriver): def __init__(self, conf, url, connection_pool, default_exchange=None, allowed_remote_exmods=[]): super(AMQPDriverBase, self).__init__(conf, url, default_exchange, allowed_remote_exmods) self._server_params = self._server_params_from_url(self._url) self._default_exchange = default_exchange # FIXME(markmc): temp hack if self._default_exchange: self.conf.set_override('control_exchange', self._default_exchange) self._connection_pool = connection_pool self._reply_q_lock = threading.Lock() self._reply_q = None self._reply_q_conn = None self._waiter = None def _server_params_from_url(self, url): sp = {} if url.virtual_host is not None: sp['virtual_host'] = url.virtual_host if url.hosts: # FIXME(markmc): support multiple hosts host = url.hosts[0] sp['hostname'] = host.hostname if host.port is not None: sp['port'] = host.port sp['username'] = host.username or '' sp['password'] = host.password or '' return sp def _get_connection(self, pooled=True): # FIXME(markmc): we don't yet have a connection pool for each # Transport instance, so we'll only use the pool with the # transport configuration from the config file server_params = self._server_params or None if server_params: pooled = False return rpc_amqp.ConnectionContext(self.conf, self._connection_pool, pooled=pooled, server_params=server_params) def _get_reply_q(self): with self._reply_q_lock: if self._reply_q is not None: return self._reply_q reply_q = 'reply_' + uuid.uuid4().hex conn = self._get_connection(pooled=False) self._waiter = ReplyWaiter(self.conf, reply_q, conn, self._allowed_remote_exmods) self._reply_q = reply_q self._reply_q_conn = conn return self._reply_q def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None, envelope=True, notify=False): # FIXME(markmc): remove this temporary hack class Context(object): def __init__(self, d): self.d = d def to_dict(self): return self.d context = Context(ctxt) msg = message if wait_for_reply: msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug('MSG_ID is %s' % (msg_id)) msg.update({'_reply_q': self._get_reply_q()}) rpc_amqp._add_unique_id(msg) rpc_amqp.pack_context(msg, context) if envelope: msg = rpc_common.serialize_msg(msg) if wait_for_reply: self._waiter.listen(msg_id) try: with self._get_connection() as conn: if notify: conn.notify_send(target.topic, msg) elif target.fanout: conn.fanout_send(target.topic, msg) else: topic = target.topic if target.server: topic = '%s.%s' % (target.topic, target.server) conn.topic_send(topic, msg, timeout=timeout) if wait_for_reply: result = self._waiter.wait(msg_id, timeout) if isinstance(result, Exception): raise result return result finally: if wait_for_reply: self._waiter.unlisten(msg_id) def send(self, target, ctxt, message, wait_for_reply=None, timeout=None): return self._send(target, ctxt, message, wait_for_reply, timeout) def send_notification(self, target, ctxt, message, version): return self._send(target, ctxt, message, envelope=(version == 2.0), notify=True) def listen(self, target): conn = self._get_connection(pooled=False) listener = AMQPListener(self, conn) conn.declare_topic_consumer(target.topic, listener) conn.declare_topic_consumer('%s.%s' % (target.topic, target.server), listener) conn.declare_fanout_consumer(target.topic, listener) return listener def listen_for_notifications(self, targets_and_priorities): conn = self._get_connection(pooled=False) listener = AMQPListener(self, conn) for target, priority in targets_and_priorities: conn.declare_topic_consumer('%s.%s' % (target.topic, priority), callback=listener, exchange_name=target.exchange) return listener def cleanup(self): if self._connection_pool: self._connection_pool.empty() self._connection_pool = None oslo.messaging-1.3.0/oslo/messaging/_drivers/matchmaker.py0000664000175300017540000002223312316527457025052 0ustar jenkinsjenkins00000000000000# Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The MatchMaker classes should except a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. """ import contextlib import logging import eventlet from oslo.config import cfg # FIXME(markmc): remove this _ = lambda s: s matchmaker_opts = [ cfg.IntOpt('matchmaker_heartbeat_freq', default=300, help='Heartbeat frequency.'), cfg.IntOpt('matchmaker_heartbeat_ttl', default=600, help='Heartbeat time-to-live.'), ] CONF = cfg.CONF CONF.register_opts(matchmaker_opts) LOG = logging.getLogger(__name__) contextmanager = contextlib.contextmanager class MatchMakerException(Exception): """Signified a match could not be found.""" message = _("Match not found by MatchMaker.") class Exchange(object): """Implements lookups. Subclass this to support hashtables, dns, etc. """ def __init__(self): pass def run(self, key): raise NotImplementedError() class Binding(object): """A binding on which to perform a lookup.""" def __init__(self): pass def test(self, key): raise NotImplementedError() class MatchMakerBase(object): """Match Maker Base Class. Build off HeartbeatMatchMakerBase if building a heartbeat-capable MatchMaker. """ def __init__(self): # Array of tuples. Index [2] toggles negation, [3] is last-if-true self.bindings = [] self.no_heartbeat_msg = _('Matchmaker does not implement ' 'registration or heartbeat.') def register(self, key, host): """Register a host on a backend. Heartbeats, if applicable, may keepalive registration. """ pass def ack_alive(self, key, host): """Acknowledge that a key.host is alive. Used internally for updating heartbeats, but may also be used publicly to acknowledge a system is alive (i.e. rpc message successfully sent to host) """ pass def is_alive(self, topic, host): """Checks if a host is alive.""" pass def expire(self, topic, host): """Explicitly expire a host's registration.""" pass def send_heartbeats(self): """Send all heartbeats. Use start_heartbeat to spawn a heartbeat greenthread, which loops this method. """ pass def unregister(self, key, host): """Unregister a topic.""" pass def start_heartbeat(self): """Spawn heartbeat greenthread.""" pass def stop_heartbeat(self): """Destroys the heartbeat greenthread.""" pass def add_binding(self, binding, rule, last=True): self.bindings.append((binding, rule, False, last)) #NOTE(ewindisch): kept the following method in case we implement the # underlying support. #def add_negate_binding(self, binding, rule, last=True): # self.bindings.append((binding, rule, True, last)) def queues(self, key): workers = [] # bit is for negate bindings - if we choose to implement it. # last stops processing rules if this matches. for (binding, exchange, bit, last) in self.bindings: if binding.test(key): workers.extend(exchange.run(key)) # Support last. if last: return workers return workers class HeartbeatMatchMakerBase(MatchMakerBase): """Base for a heart-beat capable MatchMaker. Provides common methods for registering, unregistering, and maintaining heartbeats. """ def __init__(self): self.hosts = set() self._heart = None self.host_topic = {} super(HeartbeatMatchMakerBase, self).__init__() def send_heartbeats(self): """Send all heartbeats. Use start_heartbeat to spawn a heartbeat greenthread, which loops this method. """ for key, host in self.host_topic: self.ack_alive(key, host) def ack_alive(self, key, host): """Acknowledge that a host.topic is alive. Used internally for updating heartbeats, but may also be used publicly to acknowledge a system is alive (i.e. rpc message successfully sent to host) """ raise NotImplementedError("Must implement ack_alive") def backend_register(self, key, host): """Implements registration logic. Called by register(self,key,host) """ raise NotImplementedError("Must implement backend_register") def backend_unregister(self, key, key_host): """Implements de-registration logic. Called by unregister(self,key,host) """ raise NotImplementedError("Must implement backend_unregister") def register(self, key, host): """Register a host on a backend. Heartbeats, if applicable, may keepalive registration. """ self.hosts.add(host) self.host_topic[(key, host)] = host key_host = '.'.join((key, host)) self.backend_register(key, key_host) self.ack_alive(key, host) def unregister(self, key, host): """Unregister a topic.""" if (key, host) in self.host_topic: del self.host_topic[(key, host)] self.hosts.discard(host) self.backend_unregister(key, '.'.join((key, host))) LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"), {'key': key, 'host': host}) def start_heartbeat(self): """Implementation of MatchMakerBase.start_heartbeat. Launches greenthread looping send_heartbeats(), yielding for CONF.matchmaker_heartbeat_freq seconds between iterations. """ if not self.hosts: raise MatchMakerException( _("Register before starting heartbeat.")) def do_heartbeat(): while True: self.send_heartbeats() eventlet.sleep(CONF.matchmaker_heartbeat_freq) self._heart = eventlet.spawn(do_heartbeat) def stop_heartbeat(self): """Destroys the heartbeat greenthread.""" if self._heart: self._heart.kill() class DirectBinding(Binding): """Specifies a host in the key via a '.' character. Although dots are used in the key, the behavior here is that it maps directly to a host, thus direct. """ def test(self, key): return '.' in key class TopicBinding(Binding): """Where a 'bare' key without dots. AMQP generally considers topic exchanges to be those *with* dots, but we deviate here in terminology as the behavior here matches that of a topic exchange (whereas where there are dots, behavior matches that of a direct exchange. """ def test(self, key): return '.' not in key class FanoutBinding(Binding): """Match on fanout keys, where key starts with 'fanout.' string.""" def test(self, key): return key.startswith('fanout~') class StubExchange(Exchange): """Exchange that does nothing.""" def run(self, key): return [(key, None)] class LocalhostExchange(Exchange): """Exchange where all direct topics are local.""" def __init__(self, host='localhost'): self.host = host super(Exchange, self).__init__() def run(self, key): return [('.'.join((key.split('.')[0], self.host)), self.host)] class DirectExchange(Exchange): """Exchange where all topic keys are split, sending to second half. i.e. "compute.host" sends a message to "compute.host" running on "host" """ def __init__(self): super(Exchange, self).__init__() def run(self, key): e = key.split('.', 1)[1] return [(key, e)] class MatchMakerLocalhost(MatchMakerBase): """Match Maker where all bare topics resolve to localhost. Useful for testing. """ def __init__(self, host='localhost'): super(MatchMakerLocalhost, self).__init__() self.add_binding(FanoutBinding(), LocalhostExchange(host)) self.add_binding(DirectBinding(), DirectExchange()) self.add_binding(TopicBinding(), LocalhostExchange(host)) class MatchMakerStub(MatchMakerBase): """Match Maker where topics are untouched. Useful for testing, or for AMQP/brokered queues. Will not work where knowledge of hosts is known (i.e. zeromq) """ def __init__(self): super(MatchMakerStub, self).__init__() self.add_binding(FanoutBinding(), StubExchange()) self.add_binding(DirectBinding(), StubExchange()) self.add_binding(TopicBinding(), StubExchange()) oslo.messaging-1.3.0/oslo/messaging/_drivers/pool.py0000664000175300017540000000470312316527457023711 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import threading import six @six.add_metaclass(abc.ABCMeta) class Pool(object): """A thread-safe object pool. Modelled after the eventlet.pools.Pool interface, but designed to be safe when using native threads without the GIL. Resizing is not supported. """ def __init__(self, max_size=4): super(Pool, self).__init__() self._max_size = max_size self._current_size = 0 self._cond = threading.Condition() self._items = collections.deque() def put(self, item): """Return an item to the pool.""" with self._cond: self._items.appendleft(item) self._cond.notify() def get(self): """Return an item from the pool, when one is available. This may cause the calling thread to block. """ with self._cond: while True: try: return self._items.popleft() except IndexError: pass if self._current_size < self._max_size: self._current_size += 1 break # FIXME(markmc): timeout needed to allow keyboard interrupt # http://bugs.python.org/issue8844 self._cond.wait(timeout=1) # We've grabbed a slot and dropped the lock, now do the creation try: return self.create() except Exception: with self._cond: self._current_size -= 1 raise def iter_free(self): """Iterate over free items.""" with self._cond: while True: try: yield self._items.popleft() except IndexError: break @abc.abstractmethod def create(self): """Construct a new item.""" oslo.messaging-1.3.0/oslo/messaging/_drivers/impl_qpid.py0000664000175300017540000006063512316527457024724 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2011 - 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import itertools import logging import time from oslo.config import cfg import six from oslo.messaging._drivers import amqp as rpc_amqp from oslo.messaging._drivers import amqpdriver from oslo.messaging._drivers import common as rpc_common from oslo.messaging.openstack.common import importutils from oslo.messaging.openstack.common import jsonutils # FIXME(markmc): remove this _ = lambda s: s qpid_codec = importutils.try_import("qpid.codec010") qpid_messaging = importutils.try_import("qpid.messaging") qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") LOG = logging.getLogger(__name__) qpid_opts = [ cfg.StrOpt('qpid_hostname', default='localhost', help='Qpid broker hostname.'), cfg.IntOpt('qpid_port', default=5672, help='Qpid broker port.'), cfg.ListOpt('qpid_hosts', default=['$qpid_hostname:$qpid_port'], help='Qpid HA cluster host:port pairs.'), cfg.StrOpt('qpid_username', default='', help='Username for Qpid connection.'), cfg.StrOpt('qpid_password', default='', help='Password for Qpid connection.', secret=True), cfg.StrOpt('qpid_sasl_mechanisms', default='', help='Space separated list of SASL mechanisms to use for ' 'auth.'), cfg.IntOpt('qpid_heartbeat', default=60, help='Seconds between connection keepalive heartbeats.'), cfg.StrOpt('qpid_protocol', default='tcp', help="Transport to use, either 'tcp' or 'ssl'."), cfg.BoolOpt('qpid_tcp_nodelay', default=True, help='Whether to disable the Nagle algorithm.'), # NOTE(russellb) If any additional versions are added (beyond 1 and 2), # this file could probably use some additional refactoring so that the # differences between each version are split into different classes. cfg.IntOpt('qpid_topology_version', default=1, help="The qpid topology version to use. Version 1 is what " "was originally used by impl_qpid. Version 2 includes " "some backwards-incompatible changes that allow broker " "federation to work. Users should update to version 2 " "when they are able to take everything down, as it " "requires a clean break."), ] JSON_CONTENT_TYPE = 'application/json; charset=utf8' def raise_invalid_topology_version(conf): msg = (_("Invalid value for qpid_topology_version: %d") % conf.qpid_topology_version) LOG.error(msg) raise Exception(msg) class QpidMessage(dict): def __init__(self, session, raw_message): super(QpidMessage, self).__init__( rpc_common.deserialize_msg(raw_message.content)) self._raw_message = raw_message self._session = session def acknowledge(self): self._session.acknowledge(self._raw_message) def requeue(self): pass class ConsumerBase(object): """Consumer base class.""" def __init__(self, conf, session, callback, node_name, node_opts, link_name, link_opts): """Declare a queue on an amqp session. 'session' is the amqp session to use 'callback' is the callback to call when messages are received 'node_name' is the first part of the Qpid address string, before ';' 'node_opts' will be applied to the "x-declare" section of "node" in the address string. 'link_name' goes into the "name" field of the "link" in the address string 'link_opts' will be applied to the "x-declare" section of "link" in the address string. """ self.callback = callback self.receiver = None self.session = None if conf.qpid_topology_version == 1: addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": True, "auto-delete": True, }, }, "link": { "durable": True, "x-declare": { "durable": False, "auto-delete": True, "exclusive": False, }, }, } addr_opts["node"]["x-declare"].update(node_opts) elif conf.qpid_topology_version == 2: addr_opts = { "link": { "x-declare": { "auto-delete": True, "exclusive": False, }, }, } else: raise_invalid_topology_version(conf) addr_opts["link"]["x-declare"].update(link_opts) if link_name: addr_opts["link"]["name"] = link_name self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.connect(session) def connect(self, session): """Declare the receiver on connect.""" self._declare_receiver(session) def reconnect(self, session): """Re-declare the receiver after a Qpid reconnect.""" self._declare_receiver(session) def _declare_receiver(self, session): self.session = session self.receiver = session.receiver(self.address) self.receiver.capacity = 1 def _unpack_json_msg(self, msg): """Load the JSON data in msg if msg.content_type indicates that it is necessary. Put the loaded data back into msg.content and update msg.content_type appropriately. A Qpid Message containing a dict will have a content_type of 'amqp/map', whereas one containing a string that needs to be converted back from JSON will have a content_type of JSON_CONTENT_TYPE. :param msg: a Qpid Message object :returns: None """ if msg.content_type == JSON_CONTENT_TYPE: msg.content = jsonutils.loads(msg.content) msg.content_type = 'amqp/map' def consume(self): """Fetch the message and pass it to the callback object.""" message = self.receiver.fetch() try: self._unpack_json_msg(message) self.callback(QpidMessage(self.session, message)) except Exception: LOG.exception(_("Failed to process message... skipping it.")) self.session.acknowledge(message) def get_receiver(self): return self.receiver def get_node_name(self): return self.address.split(';')[0] class DirectConsumer(ConsumerBase): """Queue/consumer class for 'direct'.""" def __init__(self, conf, session, msg_id, callback): """Init a 'direct' queue. 'session' is the amqp session to use 'msg_id' is the msg_id to listen on 'callback' is the callback to call when messages are received """ link_opts = { "auto-delete": conf.amqp_auto_delete, "exclusive": True, "durable": conf.amqp_durable_queues, } if conf.qpid_topology_version == 1: node_name = "%s/%s" % (msg_id, msg_id) node_opts = {"type": "direct"} link_name = msg_id elif conf.qpid_topology_version == 2: node_name = "amq.direct/%s" % msg_id node_opts = {} link_name = None else: raise_invalid_topology_version(conf) super(DirectConsumer, self).__init__(conf, session, callback, node_name, node_opts, link_name, link_opts) class TopicConsumer(ConsumerBase): """Consumer class for 'topic'.""" def __init__(self, conf, session, topic, callback, name=None, exchange_name=None): """Init a 'topic' queue. :param session: the amqp session to use :param topic: is the topic to listen on :paramtype topic: str :param callback: the callback to call when messages are received :param name: optional queue name, defaults to topic """ exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) link_opts = { "auto-delete": conf.amqp_auto_delete, "durable": conf.amqp_durable_queues, } if conf.qpid_topology_version == 1: node_name = "%s/%s" % (exchange_name, topic) elif conf.qpid_topology_version == 2: node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) else: raise_invalid_topology_version(conf) super(TopicConsumer, self).__init__(conf, session, callback, node_name, {}, name or topic, link_opts) class FanoutConsumer(ConsumerBase): """Consumer class for 'fanout'.""" def __init__(self, conf, session, topic, callback): """Init a 'fanout' queue. 'session' is the amqp session to use 'topic' is the topic to listen on 'callback' is the callback to call when messages are received """ self.conf = conf link_opts = {"exclusive": True} if conf.qpid_topology_version == 1: node_name = "%s_fanout" % topic node_opts = {"durable": False, "type": "fanout"} elif conf.qpid_topology_version == 2: node_name = "amq.topic/fanout/%s" % topic node_opts = {} else: raise_invalid_topology_version(conf) super(FanoutConsumer, self).__init__(conf, session, callback, node_name, node_opts, None, link_opts) class Publisher(object): """Base Publisher class.""" def __init__(self, conf, session, node_name, node_opts=None): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.sender = None self.session = session if conf.qpid_topology_version == 1: addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": False, # auto-delete isn't implemented for exchanges in qpid, # but put in here anyway "auto-delete": True, }, }, } if node_opts: addr_opts["node"]["x-declare"].update(node_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) elif conf.qpid_topology_version == 2: self.address = node_name else: raise_invalid_topology_version(conf) self.reconnect(session) def reconnect(self, session): """Re-establish the Sender after a reconnection.""" self.sender = session.sender(self.address) def _pack_json_msg(self, msg): """Qpid cannot serialize dicts containing strings longer than 65535 characters. This function dumps the message content to a JSON string, which Qpid is able to handle. :param msg: May be either a Qpid Message object or a bare dict. :returns: A Qpid Message with its content field JSON encoded. """ try: msg.content = jsonutils.dumps(msg.content) except AttributeError: # Need to have a Qpid message so we can set the content_type. msg = qpid_messaging.Message(jsonutils.dumps(msg)) msg.content_type = JSON_CONTENT_TYPE return msg def send(self, msg): """Send a message.""" try: # Check if Qpid can encode the message check_msg = msg if not hasattr(check_msg, 'content_type'): check_msg = qpid_messaging.Message(msg) content_type = check_msg.content_type enc, dec = qpid_messaging.message.get_codec(content_type) enc(check_msg.content) except qpid_codec.CodecException: # This means the message couldn't be serialized as a dict. msg = self._pack_json_msg(msg) self.sender.send(msg) class DirectPublisher(Publisher): """Publisher class for 'direct'.""" def __init__(self, conf, session, msg_id): """Init a 'direct' publisher.""" if conf.qpid_topology_version == 1: node_name = msg_id node_opts = {"type": "direct"} elif conf.qpid_topology_version == 2: node_name = "amq.direct/%s" % msg_id node_opts = {} else: raise_invalid_topology_version(conf) super(DirectPublisher, self).__init__(conf, session, node_name, node_opts) class TopicPublisher(Publisher): """Publisher class for 'topic'.""" def __init__(self, conf, session, topic): """Init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) if conf.qpid_topology_version == 1: node_name = "%s/%s" % (exchange_name, topic) elif conf.qpid_topology_version == 2: node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) else: raise_invalid_topology_version(conf) super(TopicPublisher, self).__init__(conf, session, node_name) class FanoutPublisher(Publisher): """Publisher class for 'fanout'.""" def __init__(self, conf, session, topic): """Init a 'fanout' publisher. """ if conf.qpid_topology_version == 1: node_name = "%s_fanout" % topic node_opts = {"type": "fanout"} elif conf.qpid_topology_version == 2: node_name = "amq.topic/fanout/%s" % topic node_opts = {} else: raise_invalid_topology_version(conf) super(FanoutPublisher, self).__init__(conf, session, node_name, node_opts) class NotifyPublisher(Publisher): """Publisher class for notifications.""" def __init__(self, conf, session, topic): """Init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) node_opts = {"durable": True} if conf.qpid_topology_version == 1: node_name = "%s/%s" % (exchange_name, topic) elif conf.qpid_topology_version == 2: node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) else: raise_invalid_topology_version(conf) super(NotifyPublisher, self).__init__(conf, session, node_name, node_opts) class Connection(object): """Connection object.""" pool = None def __init__(self, conf, server_params=None): if not qpid_messaging: raise ImportError("Failed to import qpid.messaging") self.connection = None self.session = None self.consumers = {} self.conf = conf if server_params and 'hostname' in server_params: # NOTE(russellb) This enables support for cast_to_server. server_params['qpid_hosts'] = [ '%s:%d' % (server_params['hostname'], server_params.get('port', 5672)) ] params = { 'qpid_hosts': self.conf.qpid_hosts, 'username': self.conf.qpid_username, 'password': self.conf.qpid_password, } params.update(server_params or {}) self.brokers = itertools.cycle(params['qpid_hosts']) self.username = params['username'] self.password = params['password'] self.reconnect() def connection_create(self, broker): # Create the connection - this does not open the connection self.connection = qpid_messaging.Connection(broker) # Check if flags are set and if so set them for the connection # before we call open self.connection.username = self.username self.connection.password = self.password self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms # Reconnection is done by self.reconnect() self.connection.reconnect = False self.connection.heartbeat = self.conf.qpid_heartbeat self.connection.transport = self.conf.qpid_protocol self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay def _register_consumer(self, consumer): self.consumers[str(consumer.get_receiver())] = consumer def _lookup_consumer(self, receiver): return self.consumers[str(receiver)] def reconnect(self): """Handles reconnecting and re-establishing sessions and queues.""" delay = 1 while True: # Close the session if necessary if self.connection is not None and self.connection.opened(): try: self.connection.close() except qpid_exceptions.ConnectionError: pass broker = six.next(self.brokers) try: self.connection_create(broker) self.connection.open() except qpid_exceptions.ConnectionError as e: msg_dict = dict(e=e, delay=delay) msg = _("Unable to connect to AMQP server: %(e)s. " "Sleeping %(delay)s seconds") % msg_dict LOG.error(msg) time.sleep(delay) delay = min(delay + 1, 5) else: LOG.info(_('Connected to AMQP server on %s'), broker) break self.session = self.connection.session() if self.consumers: consumers = self.consumers self.consumers = {} for consumer in six.itervalues(consumers): consumer.reconnect(self.session) self._register_consumer(consumer) LOG.debug(_("Re-established AMQP queues")) def ensure(self, error_callback, method, *args, **kwargs): while True: try: return method(*args, **kwargs) except (qpid_exceptions.Empty, qpid_exceptions.ConnectionError) as e: if error_callback: error_callback(e) self.reconnect() def close(self): """Close/release this connection.""" try: self.connection.close() except Exception: # NOTE(dripton) Logging exceptions that happen during cleanup just # causes confusion; there's really nothing useful we can do with # them. pass self.connection = None def reset(self): """Reset a connection so it can be used again.""" self.session.close() self.session = self.connection.session() self.consumers = {} def declare_consumer(self, consumer_cls, topic, callback): """Create a Consumer using the class that was passed in and add it to our list of consumers """ def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.error(_("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s") % log_info) def _declare_consumer(): consumer = consumer_cls(self.conf, self.session, topic, callback) self._register_consumer(consumer) return consumer return self.ensure(_connect_error, _declare_consumer) def iterconsume(self, limit=None, timeout=None): """Return an iterator that will consume from all queues/consumers.""" def _error_callback(exc): if isinstance(exc, qpid_exceptions.Empty): LOG.debug(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc)) def _consume(): nxt_receiver = self.session.next_receiver(timeout=timeout) try: self._lookup_consumer(nxt_receiver).consume() except Exception: LOG.exception(_("Error processing message. Skipping it.")) for iteration in itertools.count(0): if limit and iteration >= limit: raise StopIteration yield self.ensure(_error_callback, _consume) def publisher_send(self, cls, topic, msg): """Send to a publisher based on the publisher class.""" def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.exception(_("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info) def _publisher_send(): publisher = cls(self.conf, self.session, topic) publisher.send(msg) return self.ensure(_connect_error, _publisher_send) def declare_direct_consumer(self, topic, callback): """Create a 'direct' queue. In nova's use, this is generally a msg_id queue used for responses for call/multicall """ self.declare_consumer(DirectConsumer, topic, callback) def declare_topic_consumer(self, topic, callback=None, queue_name=None, exchange_name=None): """Create a 'topic' consumer.""" self.declare_consumer(functools.partial(TopicConsumer, name=queue_name, exchange_name=exchange_name, ), topic, callback) def declare_fanout_consumer(self, topic, callback): """Create a 'fanout' consumer.""" self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message.""" self.publisher_send(DirectPublisher, msg_id, msg) def topic_send(self, topic, msg, timeout=None): """Send a 'topic' message.""" # # We want to create a message with attributes, e.g. a TTL. We # don't really need to keep 'msg' in its JSON format any longer # so let's create an actual Qpid message here and get some # value-add on the go. # # WARNING: Request timeout happens to be in the same units as # Qpid's TTL (seconds). If this changes in the future, then this # will need to be altered accordingly. # qpid_message = qpid_messaging.Message(content=msg, ttl=timeout) self.publisher_send(TopicPublisher, topic, qpid_message) def fanout_send(self, topic, msg): """Send a 'fanout' message.""" self.publisher_send(FanoutPublisher, topic, msg) def notify_send(self, topic, msg, **kwargs): """Send a notify message on a topic.""" self.publisher_send(NotifyPublisher, topic, msg) def consume(self, limit=None, timeout=None): """Consume from all queues/consumers.""" it = self.iterconsume(limit=limit, timeout=timeout) while True: try: six.next(it) except StopIteration: return class QpidDriver(amqpdriver.AMQPDriverBase): def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=[]): conf.register_opts(qpid_opts) conf.register_opts(rpc_amqp.amqp_opts) connection_pool = rpc_amqp.get_connection_pool(conf, Connection) super(QpidDriver, self).__init__(conf, url, connection_pool, default_exchange, allowed_remote_exmods) oslo.messaging-1.3.0/oslo/messaging/_drivers/impl_fake.py0000664000175300017540000001531512316527457024670 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import threading import time from six import moves from oslo import messaging from oslo.messaging._drivers import base class FakeIncomingMessage(base.IncomingMessage): def __init__(self, listener, ctxt, message, reply_q, requeue): super(FakeIncomingMessage, self).__init__(listener, ctxt, message) self.requeue_callback = requeue self._reply_q = reply_q def reply(self, reply=None, failure=None, log_failure=True): if self._reply_q: failure = failure[1] if failure else None self._reply_q.put((reply, failure)) def requeue(self): self.requeue_callback() class FakeListener(base.Listener): def __init__(self, driver, exchange_manager, targets): super(FakeListener, self).__init__(driver) self._exchange_manager = exchange_manager self._targets = targets def poll(self): while True: for target in self._targets: exchange = self._exchange_manager.get_exchange(target.exchange) (ctxt, message, reply_q, requeue) = exchange.poll(target) if message is not None: message = FakeIncomingMessage(self, ctxt, message, reply_q, requeue) return message time.sleep(.05) class FakeExchange(object): def __init__(self, name): self.name = name self._queues_lock = threading.RLock() self._topic_queues = {} self._server_queues = {} def _get_topic_queue(self, topic): return self._topic_queues.setdefault(topic, []) def _get_server_queue(self, topic, server): return self._server_queues.setdefault((topic, server), []) def deliver_message(self, topic, ctxt, message, server=None, fanout=False, reply_q=None): with self._queues_lock: if fanout: queues = [q for t, q in self._server_queues.items() if t[0] == topic] elif server is not None: queues = [self._get_server_queue(topic, server)] else: queues = [self._get_topic_queue(topic)] def requeue(): self.deliver_message(topic, ctxt, message, server=server, fanout=fanout, reply_q=reply_q) for queue in queues: queue.append((ctxt, message, reply_q, requeue)) def poll(self, target): with self._queues_lock: if target.server: queue = self._get_server_queue(target.topic, target.server) else: queue = self._get_topic_queue(target.topic) return queue.pop(0) if queue else (None, None, None, None) class FakeExchangeManager(object): def __init__(self, default_exchange): self._default_exchange = default_exchange self._exchanges_lock = threading.Lock() self._exchanges = {} def get_exchange(self, name): if name is None: name = self._default_exchange while self._exchanges_lock: return self._exchanges.setdefault(name, FakeExchange(name)) class FakeDriver(base.BaseDriver): def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=[]): super(FakeDriver, self).__init__(conf, url, default_exchange, allowed_remote_exmods=[]) self._exchange_manager = FakeExchangeManager(default_exchange) def require_features(self, requeue=True): pass @staticmethod def _check_serialize(message): """Make sure a message intended for rpc can be serialized. We specifically want to use json, not our own jsonutils because jsonutils has some extra logic to automatically convert objects to primitive types so that they can be serialized. We want to catch all cases where non-primitive types make it into this code and treat it as an error. """ json.dumps(message) def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None): self._check_serialize(message) exchange = self._exchange_manager.get_exchange(target.exchange) reply_q = None if wait_for_reply: reply_q = moves.queue.Queue() exchange.deliver_message(target.topic, ctxt, message, server=target.server, fanout=target.fanout, reply_q=reply_q) if wait_for_reply: try: reply, failure = reply_q.get(timeout=timeout) if failure: raise failure else: return reply except moves.queue.Empty: raise messaging.MessagingTimeout( 'No reply on topic %s' % target.topic) return None def send(self, target, ctxt, message, wait_for_reply=None, timeout=None): return self._send(target, ctxt, message, wait_for_reply, timeout) def send_notification(self, target, ctxt, message, version): self._send(target, ctxt, message) def listen(self, target): exchange = target.exchange or self._default_exchange listener = FakeListener(self, self._exchange_manager, [messaging.Target(topic=target.topic, server=target.server, exchange=exchange), messaging.Target(topic=target.topic, exchange=exchange)]) return listener def listen_for_notifications(self, targets_and_priorities): targets = [messaging.Target(topic='%s.%s' % (target.topic, priority), exchange=target.exchange) for target, priority in targets_and_priorities] listener = FakeListener(self, self._exchange_manager, targets) return listener def cleanup(self): pass oslo.messaging-1.3.0/oslo/messaging/_drivers/__init__.py0000664000175300017540000000000112316527457024462 0ustar jenkinsjenkins00000000000000 oslo.messaging-1.3.0/oslo/messaging/_drivers/amqp.py0000664000175300017540000003031412316527457023673 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 - 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Shared code between AMQP based openstack.common.rpc implementations. The code in this module is shared between the rpc implementations based on AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses AMQP, but is deprecated and predates this code. """ import collections import logging import threading import uuid from oslo.config import cfg import six from oslo.messaging._drivers import common as rpc_common from oslo.messaging._drivers import pool # FIXME(markmc): remove this _ = lambda s: s amqp_opts = [ cfg.BoolOpt('amqp_durable_queues', default=False, deprecated_name='rabbit_durable_queues', deprecated_group='DEFAULT', help='Use durable queues in amqp.'), cfg.BoolOpt('amqp_auto_delete', default=False, help='Auto-delete queues in amqp.'), # FIXME(markmc): this was toplevel in openstack.common.rpc cfg.IntOpt('rpc_conn_pool_size', default=30, help='Size of RPC connection pool.'), ] UNIQUE_ID = '_unique_id' LOG = logging.getLogger(__name__) class ConnectionPool(pool.Pool): """Class that implements a Pool of Connections.""" def __init__(self, conf, connection_cls): self.connection_cls = connection_cls self.conf = conf super(ConnectionPool, self).__init__(self.conf.rpc_conn_pool_size) self.reply_proxy = None # TODO(comstud): Timeout connections not used in a while def create(self): LOG.debug(_('Pool creating new connection')) return self.connection_cls(self.conf) def empty(self): for item in self.iter_free(): item.close() # Force a new connection pool to be created. # Note that this was added due to failing unit test cases. The issue # is the above "while loop" gets all the cached connections from the # pool and closes them, but never returns them to the pool, a pool # leak. The unit tests hang waiting for an item to be returned to the # pool. The unit tests get here via the tearDown() method. In the run # time code, it gets here via cleanup() and only appears in service.py # just before doing a sys.exit(), so cleanup() only happens once and # the leakage is not a problem. self.connection_cls.pool = None _pool_create_sem = threading.Lock() def get_connection_pool(conf, connection_cls): with _pool_create_sem: # Make sure only one thread tries to create the connection pool. if not connection_cls.pool: connection_cls.pool = ConnectionPool(conf, connection_cls) return connection_cls.pool class ConnectionContext(rpc_common.Connection): """The class that is actually returned to the create_connection() caller. This is essentially a wrapper around Connection that supports 'with'. It can also return a new Connection, or one from a pool. The function will also catch when an instance of this class is to be deleted. With that we can return Connections to the pool on exceptions and so forth without making the caller be responsible for catching them. If possible the function makes sure to return a connection to the pool. """ def __init__(self, conf, connection_pool, pooled=True, server_params=None): """Create a new connection, or get one from the pool.""" self.connection = None self.conf = conf self.connection_pool = connection_pool if pooled: self.connection = connection_pool.get() else: self.connection = connection_pool.connection_cls( conf, server_params=server_params) self.pooled = pooled def __enter__(self): """When with ConnectionContext() is used, return self.""" return self def _done(self): """If the connection came from a pool, clean it up and put it back. If it did not come from a pool, close it. """ if self.connection: if self.pooled: # Reset the connection so it's ready for the next caller # to grab from the pool self.connection.reset() self.connection_pool.put(self.connection) else: try: self.connection.close() except Exception: pass self.connection = None def __exit__(self, exc_type, exc_value, tb): """End of 'with' statement. We're done here.""" self._done() def __del__(self): """Caller is done with this connection. Make sure we cleaned up.""" self._done() def close(self): """Caller is done with this connection.""" self._done() def create_consumer(self, topic, proxy, fanout=False): self.connection.create_consumer(topic, proxy, fanout) def create_worker(self, topic, proxy, pool_name): self.connection.create_worker(topic, proxy, pool_name) def join_consumer_pool(self, callback, pool_name, topic, exchange_name): self.connection.join_consumer_pool(callback, pool_name, topic, exchange_name) def consume_in_thread(self): self.connection.consume_in_thread() def __getattr__(self, key): """Proxy all other calls to the Connection instance.""" if self.connection: return getattr(self.connection, key) else: raise rpc_common.InvalidRPCConnectionReuse() class ReplyProxy(ConnectionContext): """Connection class for RPC replies / callbacks.""" def __init__(self, conf, connection_pool): self._call_waiters = {} self._num_call_waiters = 0 self._num_call_waiters_wrn_threshold = 10 self._reply_q = 'reply_' + uuid.uuid4().hex super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False) self.declare_direct_consumer(self._reply_q, self._process_data) self.consume_in_thread() def _process_data(self, message_data): msg_id = message_data.pop('_msg_id', None) waiter = self._call_waiters.get(msg_id) if not waiter: LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s' ', message : %(data)s'), {'msg_id': msg_id, 'data': message_data}) LOG.warn(_('_call_waiters: %s') % str(self._call_waiters)) else: waiter.put(message_data) def add_call_waiter(self, waiter, msg_id): self._num_call_waiters += 1 if self._num_call_waiters > self._num_call_waiters_wrn_threshold: LOG.warn(_('Number of call waiters is greater than warning ' 'threshold: %d. There could be a MulticallProxyWaiter ' 'leak.') % self._num_call_waiters_wrn_threshold) self._num_call_waiters_wrn_threshold *= 2 self._call_waiters[msg_id] = waiter def del_call_waiter(self, msg_id): self._num_call_waiters -= 1 del self._call_waiters[msg_id] def get_reply_q(self): return self._reply_q def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, failure=None, ending=False, log_failure=True): """Sends a reply or an error on the channel signified by msg_id. Failure should be a sys.exc_info() tuple. """ with ConnectionContext(conf, connection_pool) as conn: if failure: failure = rpc_common.serialize_remote_exception(failure, log_failure) msg = {'result': reply, 'failure': failure} if ending: msg['ending'] = True _add_unique_id(msg) # If a reply_q exists, add the msg_id to the reply and pass the # reply_q to direct_send() to use it as the response queue. # Otherwise use the msg_id for backward compatibility. if reply_q: msg['_msg_id'] = msg_id conn.direct_send(reply_q, rpc_common.serialize_msg(msg)) else: conn.direct_send(msg_id, rpc_common.serialize_msg(msg)) class RpcContext(rpc_common.CommonRpcContext): """Context that supports replying to a rpc.call.""" def __init__(self, **kwargs): self.msg_id = kwargs.pop('msg_id', None) self.reply_q = kwargs.pop('reply_q', None) self.conf = kwargs.pop('conf') super(RpcContext, self).__init__(**kwargs) def deepcopy(self): values = self.to_dict() values['conf'] = self.conf values['msg_id'] = self.msg_id values['reply_q'] = self.reply_q return self.__class__(**values) def reply(self, reply=None, failure=None, ending=False, connection_pool=None, log_failure=True): if self.msg_id: msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool, reply, failure, ending, log_failure) if ending: self.msg_id = None def unpack_context(conf, msg): """Unpack context from msg.""" context_dict = {} for key in list(msg.keys()): # NOTE(vish): Some versions of Python don't like unicode keys # in kwargs. key = str(key) if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value context_dict['msg_id'] = msg.pop('_msg_id', None) context_dict['reply_q'] = msg.pop('_reply_q', None) context_dict['conf'] = conf ctx = RpcContext.from_dict(context_dict) rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) return ctx def pack_context(msg, context): """Pack context into msg. Values for message keys need to be less than 255 chars, so we pull context out into a bunch of separate keys. If we want to support more arguments in rabbit messages, we may want to do the same for args at some point. """ if isinstance(context, dict): context_d = six.iteritems(context) else: context_d = six.iteritems(context.to_dict()) msg.update(('_context_%s' % key, value) for (key, value) in context_d) class _MsgIdCache(object): """This class checks any duplicate messages.""" # NOTE: This value is considered can be a configuration item, but # it is not necessary to change its value in most cases, # so let this value as static for now. DUP_MSG_CHECK_SIZE = 16 def __init__(self, **kwargs): self.prev_msgids = collections.deque([], maxlen=self.DUP_MSG_CHECK_SIZE) def check_duplicate_message(self, message_data): """AMQP consumers may read same message twice when exceptions occur before ack is returned. This method prevents doing it. """ try: msg_id = message_data.pop(UNIQUE_ID) except KeyError: return if msg_id in self.prev_msgids: raise rpc_common.DuplicateMessageError(msg_id=msg_id) return msg_id def add(self, msg_id): if msg_id and msg_id not in self.prev_msgids: self.prev_msgids.append(msg_id) def _add_unique_id(msg): """Add unique_id for checking duplicate messages.""" unique_id = uuid.uuid4().hex msg.update({UNIQUE_ID: unique_id}) LOG.debug(_('UNIQUE_ID is %s.') % (unique_id)) def get_control_exchange(conf): return conf.control_exchange oslo.messaging-1.3.0/oslo/messaging/_drivers/common.py0000664000175300017540000004426212316527457024234 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import logging import sys import traceback from oslo.config import cfg from oslo import messaging import six from oslo.messaging import _utils as utils from oslo.messaging.openstack.common import importutils from oslo.messaging.openstack.common import jsonutils # FIXME(markmc): remove this _ = lambda s: s LOG = logging.getLogger(__name__) _EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins' '''RPC Envelope Version. This version number applies to the top level structure of messages sent out. It does *not* apply to the message payload, which must be versioned independently. For example, when using rpc APIs, a version number is applied for changes to the API being exposed over rpc. This version number is handled in the rpc proxy and dispatcher modules. This version number applies to the message envelope that is used in the serialization done inside the rpc layer. See serialize_msg() and deserialize_msg(). The current message format (version 2.0) is very simple. It is: { 'oslo.version': , 'oslo.message': } Message format version '1.0' is just considered to be the messages we sent without a message envelope. So, the current message envelope just includes the envelope version. It may eventually contain additional information, such as a signature for the message payload. We will JSON encode the application message payload. The message envelope, which includes the JSON encoded application message body, will be passed down to the messaging libraries as a dict. ''' _RPC_ENVELOPE_VERSION = '2.0' _VERSION_KEY = 'oslo.version' _MESSAGE_KEY = 'oslo.message' _REMOTE_POSTFIX = '_Remote' _exception_opts = [ cfg.ListOpt('allowed_rpc_exception_modules', default=['oslo.messaging.exceptions', 'nova.exception', 'cinder.exception', _EXCEPTIONS_MODULE, ], help='Modules of exceptions that are permitted to be ' 'recreated upon receiving exception data from an rpc ' 'call.'), ] class RPCException(Exception): msg_fmt = _("An unknown RPC related exception occurred.") def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: try: message = self.msg_fmt % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in six.iteritems(kwargs): LOG.error("%s: %s" % (name, value)) # at least get the core message out if something happened message = self.msg_fmt super(RPCException, self).__init__(message) class RemoteError(RPCException): """Signifies that a remote class has raised an exception. Contains a string representation of the type of the original exception, the value of the original exception, and the traceback. These are sent to the parent as a joined string so printing the exception contains all of the relevant info. """ msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") def __init__(self, exc_type=None, value=None, traceback=None): self.exc_type = exc_type self.value = value self.traceback = traceback super(RemoteError, self).__init__(exc_type=exc_type, value=value, traceback=traceback) class Timeout(RPCException): """Signifies that a timeout has occurred. This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ msg_fmt = _('Timeout while waiting on RPC response - ' 'topic: "%(topic)s", RPC method: "%(method)s" ' 'info: "%(info)s"') def __init__(self, info=None, topic=None, method=None): """Initiates Timeout object. :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__( None, info=info or _(''), topic=topic or _(''), method=method or _('')) class DuplicateMessageError(RPCException): msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.") class InvalidRPCConnectionReuse(RPCException): msg_fmt = _("Invalid reuse of an RPC connection.") class UnsupportedRpcVersion(RPCException): msg_fmt = _("Specified RPC version, %(version)s, not supported by " "this endpoint.") class UnsupportedRpcEnvelopeVersion(RPCException): msg_fmt = _("Specified RPC envelope version, %(version)s, " "not supported by this endpoint.") class RpcVersionCapError(RPCException): msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low") class Connection(object): """A connection, returned by rpc.create_connection(). This class represents a connection to the message bus used for rpc. An instance of this class should never be created by users of the rpc API. Use rpc.create_connection() instead. """ def close(self): """Close the connection. This method must be called when the connection will no longer be used. It will ensure that any resources associated with the connection, such as a network connection, and cleaned up. """ raise NotImplementedError() def create_consumer(self, topic, proxy, fanout=False): """Create a consumer on this connection. A consumer is associated with a message queue on the backend message bus. The consumer will read messages from the queue, unpack them, and dispatch them to the proxy object. The contents of the message pulled off of the queue will determine which method gets called on the proxy object. :param topic: This is a name associated with what to consume from. Multiple instances of a service may consume from the same topic. For example, all instances of nova-compute consume from a queue called "compute". In that case, the messages will get distributed amongst the consumers in a round-robin fashion if fanout=False. If fanout=True, every consumer associated with this topic will get a copy of every message. :param proxy: The object that will handle all incoming messages. :param fanout: Whether or not this is a fanout topic. See the documentation for the topic parameter for some additional comments on this. """ raise NotImplementedError() def create_worker(self, topic, proxy, pool_name): """Create a worker on this connection. A worker is like a regular consumer of messages directed to a topic, except that it is part of a set of such consumers (the "pool") which may run in parallel. Every pool of workers will receive a given message, but only one worker in the pool will be asked to process it. Load is distributed across the members of the pool in round-robin fashion. :param topic: This is a name associated with what to consume from. Multiple instances of a service may consume from the same topic. :param proxy: The object that will handle all incoming messages. :param pool_name: String containing the name of the pool of workers """ raise NotImplementedError() def join_consumer_pool(self, callback, pool_name, topic, exchange_name): """Register as a member of a group of consumers. Uses given topic from the specified exchange. Exactly one member of a given pool will receive each message. A message will be delivered to multiple pools, if more than one is created. :param callback: Callable to be invoked for each message. :type callback: callable accepting one argument :param pool_name: The name of the consumer pool. :type pool_name: str :param topic: The routing topic for desired messages. :type topic: str :param exchange_name: The name of the message exchange where the client should attach. Defaults to the configured exchange. :type exchange_name: str """ raise NotImplementedError() def consume_in_thread(self): """Spawn a thread to handle incoming messages. Spawn a thread that will be responsible for handling all incoming messages for consumers that were set up on this connection. Message dispatching inside of this is expected to be implemented in a non-blocking manner. An example implementation would be having this thread pull messages in for all of the consumers, but utilize a thread pool for dispatching the messages to the proxy objects. """ raise NotImplementedError() def _safe_log(log_func, msg, msg_data): """Sanitizes the msg_data field before logging.""" SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass'] def _fix_passwords(d): """Sanitizes the password fields in the dictionary.""" for k in six.iterkeys(d): if k.lower().find('password') != -1: d[k] = '' elif k.lower() in SANITIZE: d[k] = '' elif isinstance(d[k], dict): _fix_passwords(d[k]) return d return log_func(msg, _fix_passwords(copy.deepcopy(msg_data))) def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: LOG.error(_("Returning exception %s to caller"), six.text_type(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs # NOTE(matiu): With cells, it's possible to re-raise remote, remote # exceptions. Lets turn it back into the original exception type. cls_name = str(failure.__class__.__name__) mod_name = str(failure.__class__.__module__) if (cls_name.endswith(_REMOTE_POSTFIX) and mod_name.endswith(_REMOTE_POSTFIX)): cls_name = cls_name[:-len(_REMOTE_POSTFIX)] mod_name = mod_name[:-len(_REMOTE_POSTFIX)] data = { 'class': cls_name, 'module': mod_name, 'message': six.text_type(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data def deserialize_remote_exception(data, allowed_remote_exmods): failure = jsonutils.loads(str(data)) trace = failure.get('tb', []) message = failure.get('message', "") + "\n" + "\n".join(trace) name = failure.get('class') module = failure.get('module') # NOTE(ameade): We DO NOT want to allow just any module to be imported, in # order to prevent arbitrary code execution. if module != _EXCEPTIONS_MODULE and module not in allowed_remote_exmods: return messaging.RemoteError(name, failure.get('message'), trace) try: mod = importutils.import_module(module) klass = getattr(mod, name) if not issubclass(klass, Exception): raise TypeError("Can only deserialize Exceptions") failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) except (AttributeError, TypeError, ImportError): return messaging.RemoteError(name, failure.get('message'), trace) ex_type = type(failure) str_override = lambda self: message new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,), {'__str__': str_override, '__unicode__': str_override}) new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX) try: # NOTE(ameade): Dynamically create a new exception type and swap it in # as the new type for the exception. This only works on user defined # Exceptions and not core Python exceptions. This is important because # we cannot necessarily change an exception message so we must override # the __str__ method. failure.__class__ = new_ex_type except TypeError: # NOTE(ameade): If a core exception then just add the traceback to the # first exception argument. failure.args = (message,) + failure.args[1:] return failure class CommonRpcContext(object): def __init__(self, **kwargs): self.values = kwargs def __getattr__(self, key): try: return self.values[key] except KeyError: raise AttributeError(key) def to_dict(self): return copy.deepcopy(self.values) @classmethod def from_dict(cls, values): return cls(**values) def deepcopy(self): return self.from_dict(self.to_dict()) def update_store(self): #local.store.context = self pass def elevated(self, read_deleted=None, overwrite=False): """Return a version of this context with admin flag set.""" # TODO(russellb) This method is a bit of a nova-ism. It makes # some assumptions about the data in the request context sent # across rpc, while the rest of this class does not. We could get # rid of this if we changed the nova code that uses this to # convert the RpcContext back to its native RequestContext doing # something like nova.context.RequestContext.from_dict(ctxt.to_dict()) context = self.deepcopy() context.values['is_admin'] = True context.values.setdefault('roles', []) if 'admin' not in context.values['roles']: context.values['roles'].append('admin') if read_deleted is not None: context.values['read_deleted'] = read_deleted return context class ClientException(Exception): """Encapsulates actual exception expected to be hit by a RPC proxy object. Merely instantiating it records the current exception information, which will be passed back to the RPC client without exceptional logging. """ def __init__(self): self._exc_info = sys.exc_info() def catch_client_exception(exceptions, func, *args, **kwargs): try: return func(*args, **kwargs) except Exception as e: if type(e) in exceptions: raise ClientException() else: raise def client_exceptions(*exceptions): """Decorator for manager methods that raise expected exceptions. Marking a Manager method with this decorator allows the declaration of expected exceptions that the RPC layer should not consider fatal, and not log as if they were generated in a real error scenario. Note that this will cause listed exceptions to be wrapped in a ClientException, which is used internally by the RPC layer. """ def outer(func): def inner(*args, **kwargs): return catch_client_exception(exceptions, func, *args, **kwargs) return inner return outer def serialize_msg(raw_msg): # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more # information about this format. msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, _MESSAGE_KEY: jsonutils.dumps(raw_msg)} return msg def deserialize_msg(msg): # NOTE(russellb): Hang on to your hats, this road is about to # get a little bumpy. # # Robustness Principle: # "Be strict in what you send, liberal in what you accept." # # At this point we have to do a bit of guessing about what it # is we just received. Here is the set of possibilities: # # 1) We received a dict. This could be 2 things: # # a) Inspect it to see if it looks like a standard message envelope. # If so, great! # # b) If it doesn't look like a standard message envelope, it could either # be a notification, or a message from before we added a message # envelope (referred to as version 1.0). # Just return the message as-is. # # 2) It's any other non-dict type. Just return it and hope for the best. # This case covers return values from rpc.call() from before message # envelopes were used. (messages to call a method were always a dict) if not isinstance(msg, dict): # See #2 above. return msg base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY) if not all(map(lambda key: key in msg, base_envelope_keys)): # See #1.b above. return msg # At this point we think we have the message envelope # format we were expecting. (#1.a above) if not utils.version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]): raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY]) raw_msg = jsonutils.loads(msg[_MESSAGE_KEY]) return raw_msg oslo.messaging-1.3.0/oslo/messaging/_drivers/matchmaker_ring.py0000664000175300017540000000662712316527457026102 0ustar jenkinsjenkins00000000000000# Copyright 2011-2013 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The MatchMaker classes should except a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. """ import itertools import json import logging from oslo.config import cfg from oslo.messaging._drivers import matchmaker as mm # FIXME(markmc): remove this _ = lambda s: s matchmaker_opts = [ # Matchmaker ring file cfg.StrOpt('ringfile', deprecated_name='matchmaker_ringfile', deprecated_group='DEFAULT', default='/etc/oslo/matchmaker_ring.json', help='Matchmaker ring file (JSON).'), ] CONF = cfg.CONF CONF.register_opts(matchmaker_opts, 'matchmaker_ring') LOG = logging.getLogger(__name__) class RingExchange(mm.Exchange): """Match Maker where hosts are loaded from a static JSON formatted file. __init__ takes optional ring dictionary argument, otherwise loads the ringfile from CONF.mathcmaker_ringfile. """ def __init__(self, ring=None): super(RingExchange, self).__init__() if ring: self.ring = ring else: fh = open(CONF.matchmaker_ring.ringfile, 'r') self.ring = json.load(fh) fh.close() self.ring0 = {} for k in self.ring.keys(): self.ring0[k] = itertools.cycle(self.ring[k]) def _ring_has(self, key): return key in self.ring0 class RoundRobinRingExchange(RingExchange): """A Topic Exchange based on a hashmap.""" def __init__(self, ring=None): super(RoundRobinRingExchange, self).__init__(ring) def run(self, key): if not self._ring_has(key): LOG.warn( _("No key defining hosts for topic '%s', " "see ringfile") % (key, ) ) return [] host = next(self.ring0[key]) return [(key + '.' + host, host)] class FanoutRingExchange(RingExchange): """Fanout Exchange based on a hashmap.""" def __init__(self, ring=None): super(FanoutRingExchange, self).__init__(ring) def run(self, key): # Assume starts with "fanout~", strip it for lookup. nkey = key.split('fanout~')[1:][0] if not self._ring_has(nkey): LOG.warn( _("No key defining hosts for topic '%s', " "see ringfile") % (nkey, ) ) return [] return map(lambda x: (key + '.' + x, x), self.ring[nkey]) class MatchMakerRing(mm.MatchMakerBase): """Match Maker where hosts are loaded from a static hashmap.""" def __init__(self, ring=None): super(MatchMakerRing, self).__init__() self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring)) self.add_binding(mm.DirectBinding(), mm.DirectExchange()) self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring)) oslo.messaging-1.3.0/oslo/messaging/_drivers/impl_zmq.py0000664000175300017540000007622112316527457024574 0ustar jenkinsjenkins00000000000000# Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import logging import os import pprint import re import socket import sys import threading import types import uuid import eventlet import greenlet from oslo.config import cfg import six from six import moves from oslo.messaging._drivers import base from oslo.messaging._drivers import common as rpc_common from oslo.messaging._executors import impl_eventlet # FIXME(markmc) from oslo.messaging.openstack.common import excutils from oslo.messaging.openstack.common import importutils from oslo.messaging.openstack.common import jsonutils zmq = importutils.try_import('eventlet.green.zmq') # for convenience, are not modified. pformat = pprint.pformat Timeout = eventlet.timeout.Timeout LOG = logging.getLogger(__name__) RemoteError = rpc_common.RemoteError RPCException = rpc_common.RPCException # FIXME(markmc): remove this _ = lambda s: s zmq_opts = [ cfg.StrOpt('rpc_zmq_bind_address', default='*', help='ZeroMQ bind address. Should be a wildcard (*), ' 'an ethernet interface, or IP. ' 'The "host" option should point or resolve to this ' 'address.'), # The module.Class to use for matchmaking. cfg.StrOpt( 'rpc_zmq_matchmaker', default=('oslo.messaging._drivers.' 'matchmaker.MatchMakerLocalhost'), help='MatchMaker driver.', ), # The following port is unassigned by IANA as of 2012-05-21 cfg.IntOpt('rpc_zmq_port', default=9501, help='ZeroMQ receiver listening port.'), cfg.IntOpt('rpc_zmq_contexts', default=1, help='Number of ZeroMQ contexts, defaults to 1.'), cfg.IntOpt('rpc_zmq_topic_backlog', default=None, help='Maximum number of ingress messages to locally buffer ' 'per topic. Default is unlimited.'), cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack', help='Directory for holding IPC sockets.'), cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(), help='Name of this node. Must be a valid hostname, FQDN, or ' 'IP address. Must match "host" option, if running Nova.'), cfg.IntOpt('rpc_cast_timeout', default=30, help='Seconds to wait before a cast expires (TTL). ' 'Only supported by impl_zmq.'), ] CONF = cfg.CONF ZMQ_CTX = None # ZeroMQ Context, must be global. matchmaker = None # memoized matchmaker object def _serialize(data): """Serialization wrapper. We prefer using JSON, but it cannot encode all types. Error if a developer passes us bad data. """ try: return jsonutils.dumps(data, ensure_ascii=True) except TypeError: with excutils.save_and_reraise_exception(): LOG.error(_("JSON serialization failed.")) def _deserialize(data): """Deserialization wrapper.""" LOG.debug(_("Deserializing: %s"), data) return jsonutils.loads(data) class ZmqSocket(object): """A tiny wrapper around ZeroMQ. Simplifies the send/recv protocol and connection management. Can be used as a Context (supports the 'with' statement). """ def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.sock = _get_ctxt().socket(zmq_type) self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB, ) # Support list, str, & None for subscribe arg (cast to list) do_sub = { list: subscribe, str: [subscribe], type(None): [] }[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = {'addr': addr, 'type': self.socket_s(), 'subscribe': subscribe, 'bind': bind} LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) LOG.debug(_("-> bind: %(bind)s"), str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket.")) def socket_s(self): """Get socket type as string.""" t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER', 'DEALER') return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type] def subscribe(self, msg_filter): """Subscribe.""" if not self.can_sub: raise RPCException("Cannot subscribe on this socket.") LOG.debug(_("Subscribing to %s"), msg_filter) try: self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter) except Exception: return self.subscriptions.append(msg_filter) def unsubscribe(self, msg_filter): """Unsubscribe.""" if msg_filter not in self.subscriptions: return self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter) self.subscriptions.remove(msg_filter) def close(self): if self.sock is None or self.sock.closed: return # We must unsubscribe, or we'll leak descriptors. if self.subscriptions: for f in self.subscriptions: try: self.sock.setsockopt(zmq.UNSUBSCRIBE, f) except Exception: pass self.subscriptions = [] try: # Default is to linger self.sock.close() except Exception: # While this is a bad thing to happen, # it would be much worse if some of the code calling this # were to fail. For now, lets log, and later evaluate # if we can safely raise here. LOG.error("ZeroMQ socket could not be closed.") self.sock = None def recv(self, **kwargs): if not self.can_recv: raise RPCException(_("You cannot recv on this socket.")) return self.sock.recv_multipart(**kwargs) def send(self, data, **kwargs): if not self.can_send: raise RPCException(_("You cannot send on this socket.")) self.sock.send_multipart(data, **kwargs) class ZmqClient(object): """Client for ZMQ sockets.""" def __init__(self, addr): self.outq = ZmqSocket(addr, zmq.PUSH, bind=False) def cast(self, msg_id, topic, data, envelope): msg_id = msg_id or 0 if not envelope: self.outq.send(map(bytes, (msg_id, topic, 'cast', _serialize(data)))) return rpc_envelope = rpc_common.serialize_msg(data[1], envelope) zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items()) self.outq.send(map(bytes, (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg)) def close(self): self.outq.close() class RpcContext(rpc_common.CommonRpcContext): """Context that supports replying to a rpc.call.""" def __init__(self, **kwargs): self.replies = [] super(RpcContext, self).__init__(**kwargs) def deepcopy(self): values = self.to_dict() values['replies'] = self.replies return self.__class__(**values) def reply(self, reply=None, failure=None, ending=False): if ending: return self.replies.append(reply) @classmethod def marshal(self, ctx): ctx_data = ctx.to_dict() return _serialize(ctx_data) @classmethod def unmarshal(self, data): return RpcContext.from_dict(_deserialize(data)) class InternalContext(object): """Used by ConsumerBase as a private context for - methods.""" def __init__(self, proxy): self.proxy = proxy self.msg_waiter = None def _get_response(self, ctx, proxy, topic, data): """Process a curried message and cast the result to topic.""" LOG.debug(_("Running func with context: %s"), ctx.to_dict()) data.setdefault('version', None) data.setdefault('args', {}) try: result = proxy.dispatch( ctx, data['version'], data['method'], data.get('namespace'), **data['args']) return ConsumerBase.normalize_reply(result, ctx.replies) except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass except rpc_common.ClientException as e: LOG.debug(_("Expected exception during message handling (%s)") % e._exc_info[1]) return {'exc': rpc_common.serialize_remote_exception(e._exc_info, log_failure=False)} except Exception: LOG.error(_("Exception during message handling")) return {'exc': rpc_common.serialize_remote_exception(sys.exc_info())} def reply(self, ctx, proxy, msg_id=None, context=None, topic=None, msg=None): """Reply to a casted call.""" # NOTE(ewindisch): context kwarg exists for Grizzly compat. # this may be able to be removed earlier than # 'I' if ConsumerBase.process were refactored. if type(msg) is list: payload = msg[-1] else: payload = msg response = ConsumerBase.normalize_reply( self._get_response(ctx, proxy, topic, payload), ctx.replies) LOG.debug(_("Sending reply")) _multi_send(_cast, ctx, topic, { 'method': '-process_reply', 'args': { 'msg_id': msg_id, # Include for Folsom compat. 'response': response } }, _msg_id=msg_id) class ConsumerBase(object): """Base Consumer.""" def __init__(self): self.private_ctx = InternalContext(None) @classmethod def normalize_reply(self, result, replies): #TODO(ewindisch): re-evaluate and document this method. if isinstance(result, types.GeneratorType): return list(result) elif replies: return replies else: return [result] def process(self, proxy, ctx, data): data.setdefault('version', None) data.setdefault('args', {}) # Method starting with - are # processed internally. (non-valid method name) method = data.get('method') if not method: LOG.error(_("RPC message did not include method.")) return # Internal method # uses internal context for safety. if method == '-reply': self.private_ctx.reply(ctx, proxy, **data['args']) return proxy.dispatch(ctx, data['version'], data['method'], data.get('namespace'), **data['args']) class ZmqBaseReactor(ConsumerBase): """A consumer class implementing a centralized casting broker (PULL-PUSH). Used for RoundRobin requests. """ def __init__(self, conf): super(ZmqBaseReactor, self).__init__() self.proxies = {} self.threads = [] self.sockets = [] self.subscribe = {} self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) def register(self, proxy, in_addr, zmq_type_in, in_bind=True, subscribe=None): LOG.info(_("Registering reactor")) if zmq_type_in not in (zmq.PULL, zmq.SUB): raise RPCException("Bad input socktype") # Items push in. inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, subscribe=subscribe) self.proxies[inq] = proxy self.sockets.append(inq) LOG.info(_("In reactor registered")) def consume_in_thread(self): def _consume(sock): LOG.info(_("Consuming socket")) while True: self.consume(sock) for k in self.proxies.keys(): self.threads.append( self.pool.spawn(_consume, k) ) def wait(self): for t in self.threads: t.wait() def close(self): for s in self.sockets: s.close() for t in self.threads: t.kill() class ZmqProxy(ZmqBaseReactor): """A consumer class implementing a topic-based proxy. Forwards to IPC sockets. """ def __init__(self, conf): super(ZmqProxy, self).__init__(conf) pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\')) self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep))) self.topic_proxy = {} def consume(self, sock): ipc_dir = CONF.rpc_zmq_ipc_dir data = sock.recv(copy=False) topic = data[1].bytes if topic.startswith('fanout~'): sock_type = zmq.PUB topic = topic.split('.', 1)[0] elif topic.startswith('zmq_replies'): sock_type = zmq.PUB else: sock_type = zmq.PUSH if topic not in self.topic_proxy: def publisher(waiter): LOG.info(_("Creating proxy for topic: %s"), topic) try: # The topic is received over the network, # don't trust this input. if self.badchars.search(topic) is not None: emsg = _("Topic contained dangerous characters.") LOG.warn(emsg) raise RPCException(emsg) out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) except RPCException: waiter.send_exception(*sys.exc_info()) return self.topic_proxy[topic] = eventlet.queue.LightQueue( CONF.rpc_zmq_topic_backlog) self.sockets.append(out_sock) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(.5) waiter.send(True) while(True): data = self.topic_proxy[topic].get() out_sock.send(data, copy=False) wait_sock_creation = eventlet.event.Event() eventlet.spawn(publisher, wait_sock_creation) try: wait_sock_creation.wait() except RPCException: LOG.error(_("Topic socket file creation failed.")) return try: self.topic_proxy[topic].put_nowait(data) except eventlet.queue.Full: LOG.error(_("Local per-topic backlog buffer full for topic " "%(topic)s. Dropping message.") % {'topic': topic}) def consume_in_thread(self): """Runs the ZmqProxy service.""" ipc_dir = CONF.rpc_zmq_ipc_dir consume_in = "tcp://%s:%s" % \ (CONF.rpc_zmq_bind_address, CONF.rpc_zmq_port) consumption_proxy = InternalContext(None) try: os.makedirs(ipc_dir) except os.error: if not os.path.isdir(ipc_dir): with excutils.save_and_reraise_exception(): LOG.error(_("Required IPC directory does not exist at" " %s") % (ipc_dir, )) try: self.register(consumption_proxy, consume_in, zmq.PULL) except zmq.ZMQError: if os.access(ipc_dir, os.X_OK): with excutils.save_and_reraise_exception(): LOG.error(_("Permission denied to IPC directory at" " %s") % (ipc_dir, )) with excutils.save_and_reraise_exception(): LOG.error(_("Could not create ZeroMQ receiver daemon. " "Socket may already be in use.")) super(ZmqProxy, self).consume_in_thread() def unflatten_envelope(packenv): """Unflattens the RPC envelope. Takes a list and returns a dictionary. i.e. [1,2,3,4] => {1: 2, 3: 4} """ i = iter(packenv) h = {} try: while True: k = six.next(i) h[k] = six.next(i) except StopIteration: return h class ZmqReactor(ZmqBaseReactor): """A consumer class implementing a consumer for messages. Can also be used as a 1:1 proxy """ def __init__(self, conf): super(ZmqReactor, self).__init__(conf) def consume(self, sock): #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) proxy = self.proxies[sock] if data[2] == 'cast': # Legacy protocol packenv = data[3] ctx, msg = _deserialize(packenv) request = rpc_common.deserialize_msg(msg) ctx = RpcContext.unmarshal(ctx) elif data[2] == 'impl_zmq_v2': packenv = data[4:] msg = unflatten_envelope(packenv) request = rpc_common.deserialize_msg(msg) # Unmarshal only after verifying the message. ctx = RpcContext.unmarshal(data[3]) else: LOG.error(_("ZMQ Envelope version unsupported or unknown.")) return self.pool.spawn_n(self.process, proxy, ctx, request) class Connection(rpc_common.Connection): """Manages connections and threads.""" def __init__(self, conf): self.topics = [] self.reactor = ZmqReactor(conf) def create_consumer(self, topic, proxy, fanout=False): # Register with matchmaker. _get_matchmaker().register(topic, CONF.rpc_zmq_host) # Subscription scenarios if fanout: sock_type = zmq.SUB subscribe = ('', fanout)[type(fanout) == str] topic = 'fanout~' + topic.split('.', 1)[0] else: sock_type = zmq.PULL subscribe = None topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host)) if topic in self.topics: LOG.info(_("Skipping topic registration. Already registered.")) return # Receive messages from (local) proxy inaddr = "ipc://%s/zmq_topic_%s" % \ (CONF.rpc_zmq_ipc_dir, topic) LOG.debug(_("Consumer is a zmq.%s"), ['PULL', 'SUB'][sock_type == zmq.SUB]) self.reactor.register(proxy, inaddr, sock_type, subscribe=subscribe, in_bind=False) self.topics.append(topic) def close(self): _get_matchmaker().stop_heartbeat() for topic in self.topics: _get_matchmaker().unregister(topic, CONF.rpc_zmq_host) self.reactor.close() self.topics = [] def wait(self): self.reactor.wait() def consume_in_thread(self): _get_matchmaker().start_heartbeat() self.reactor.consume_in_thread() def _cast(addr, context, topic, msg, timeout=None, envelope=False, _msg_id=None, allowed_remote_exmods=[]): timeout_cast = timeout or CONF.rpc_cast_timeout payload = [RpcContext.marshal(context), msg] with Timeout(timeout_cast, exception=rpc_common.Timeout): try: conn = ZmqClient(addr) # assumes cast can't return an exception conn.cast(_msg_id, topic, payload, envelope) except zmq.ZMQError: raise RPCException("Cast failed. ZMQ Socket Exception") finally: if 'conn' in vars(): conn.close() def _call(addr, context, topic, msg, timeout=None, envelope=False, allowed_remote_exmods=[]): # timeout_response is how long we wait for a response timeout = timeout or CONF.rpc_response_timeout # The msg_id is used to track replies. msg_id = uuid.uuid4().hex # Replies always come into the reply service. reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host LOG.debug(_("Creating payload")) # Curry the original request into a reply method. mcontext = RpcContext.marshal(context) payload = { 'method': '-reply', 'args': { 'msg_id': msg_id, 'topic': reply_topic, # TODO(ewindisch): safe to remove mcontext in I. 'msg': [mcontext, msg] } } LOG.debug(_("Creating queue socket for reply waiter")) # Messages arriving async. # TODO(ewindisch): have reply consumer with dynamic subscription mgmt with Timeout(timeout, exception=rpc_common.Timeout): try: msg_waiter = ZmqSocket( "ipc://%s/zmq_topic_zmq_replies.%s" % (CONF.rpc_zmq_ipc_dir, CONF.rpc_zmq_host), zmq.SUB, subscribe=msg_id, bind=False ) LOG.debug(_("Sending cast")) _cast(addr, context, topic, payload, envelope) LOG.debug(_("Cast sent; Waiting reply")) # Blocks until receives reply msg = msg_waiter.recv() LOG.debug(_("Received message: %s"), msg) LOG.debug(_("Unpacking response")) if msg[2] == 'cast': # Legacy version raw_msg = _deserialize(msg[-1])[-1] elif msg[2] == 'impl_zmq_v2': rpc_envelope = unflatten_envelope(msg[4:]) raw_msg = rpc_common.deserialize_msg(rpc_envelope) else: raise rpc_common.UnsupportedRpcEnvelopeVersion( _("Unsupported or unknown ZMQ envelope returned.")) responses = raw_msg['args']['response'] # ZMQError trumps the Timeout error. except zmq.ZMQError: raise RPCException("ZMQ Socket Error") except (IndexError, KeyError): raise RPCException(_("RPC Message Invalid.")) finally: if 'msg_waiter' in vars(): msg_waiter.close() # It seems we don't need to do all of the following, # but perhaps it would be useful for multicall? # One effect of this is that we're checking all # responses for Exceptions. for resp in responses: if isinstance(resp, types.DictType) and 'exc' in resp: raise rpc_common.deserialize_remote_exception( resp['exc'], allowed_remote_exmods) return responses[-1] def _multi_send(method, context, topic, msg, timeout=None, envelope=False, _msg_id=None, allowed_remote_exmods=[]): """Wraps the sending of messages. Dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) queues = _get_matchmaker().queues(topic) LOG.debug(_("Sending message(s) to: %s"), queues) # Don't stack if we have no matchmaker results if not queues: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. raise rpc_common.Timeout(_("No match from matchmaker.")) # This supports brokerless fanout (addresses > 1) for queue in queues: (_topic, ip_addr) = queue _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) if method.__name__ == '_cast': eventlet.spawn_n(method, _addr, context, _topic, msg, timeout, envelope, _msg_id) return return method(_addr, context, _topic, msg, timeout, envelope, allowed_remote_exmods) def create_connection(conf, new=True): return Connection(conf) def multicall(conf, *args, **kwargs): """Multiple calls.""" return _multi_send(_call, *args, **kwargs) def call(conf, *args, **kwargs): """Send a message, expect a response.""" data = _multi_send(_call, *args, **kwargs) return data[-1] def cast(conf, *args, **kwargs): """Send a message expecting no reply.""" _multi_send(_cast, *args, **kwargs) def fanout_cast(conf, context, topic, msg, **kwargs): """Send a message to all listening and expect no reply.""" # NOTE(ewindisch): fanout~ is used because it avoid splitting on . # and acts as a non-subtle hint to the matchmaker and ZmqProxy. _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) def notify(conf, context, topic, msg, envelope): """Send notification event. Notifications are sent to topic-priority. This differs from the AMQP drivers which send to topic.priority. """ # NOTE(ewindisch): dot-priority in rpc notifier does not # work with our assumptions. topic = topic.replace('.', '-') cast(conf, context, topic, msg, envelope=envelope) def cleanup(): """Clean up resources in use by implementation.""" global ZMQ_CTX if ZMQ_CTX: ZMQ_CTX.term() ZMQ_CTX = None global matchmaker matchmaker = None def _get_ctxt(): if not zmq: raise ImportError("Failed to import eventlet.green.zmq") global ZMQ_CTX if not ZMQ_CTX: ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts) return ZMQ_CTX def _get_matchmaker(*args, **kwargs): global matchmaker if not matchmaker: mm = CONF.rpc_zmq_matchmaker if mm.endswith('matchmaker.MatchMakerRing'): mm.replace('matchmaker', 'matchmaker_ring') LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use' ' %(new)s instead') % dict( orig=CONF.rpc_zmq_matchmaker, new=mm)) matchmaker = importutils.import_object(mm, *args, **kwargs) return matchmaker class ZmqIncomingMessage(base.IncomingMessage): ReceivedReply = collections.namedtuple( 'ReceivedReply', ['reply', 'failure', 'log_failure']) def __init__(self, listener, ctxt, message): super(ZmqIncomingMessage, self).__init__(listener, ctxt, message) self.condition = threading.Condition() self.received = None def reply(self, reply=None, failure=None, log_failure=True): self.received = self.ReceivedReply(reply, failure, log_failure) with self.condition: self.condition.notify() def requeue(self): pass class ZmqListener(base.Listener): def __init__(self, driver): super(ZmqListener, self).__init__(driver) self.incoming_queue = moves.queue.Queue() def dispatch(self, ctxt, version, method, namespace, **kwargs): message = { 'method': method, 'args': kwargs } if version: message['version'] = version if namespace: message['namespace'] = namespace incoming = ZmqIncomingMessage(self, ctxt.to_dict(), message) self.incoming_queue.put(incoming) with incoming.condition: incoming.condition.wait() assert incoming.received if incoming.received.failure: raise incoming.received.failure else: return incoming.received.reply def poll(self): while True: return self.incoming_queue.get() class ZmqDriver(base.BaseDriver): # FIXME(markmc): allow this driver to be used without eventlet def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=[]): conf.register_opts(zmq_opts) conf.register_opts(impl_eventlet._eventlet_opts) super(ZmqDriver, self).__init__(conf, url, default_exchange, allowed_remote_exmods) # FIXME(markmc): handle default_exchange # FIXME(markmc): handle transport URL if self._url.hosts: raise NotImplementedError('The ZeroMQ driver does not yet support ' 'transport URLs') # FIXME(markmc): use self.conf everywhere if self.conf is not CONF: raise NotImplementedError('The ZeroMQ driver currently only works ' 'with oslo.config.cfg.CONF') def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None, envelope=True): # FIXME(markmc): remove this temporary hack class Context(object): def __init__(self, d): self.d = d def to_dict(self): return self.d context = Context(ctxt) if wait_for_reply: method = _call else: method = _cast topic = target.topic if target.fanout: # NOTE(ewindisch): fanout~ is used because it avoid splitting on # and acts as a non-subtle hint to the matchmaker and ZmqProxy. topic = 'fanout~' + topic reply = _multi_send(method, context, topic, message, envelope=envelope, allowed_remote_exmods=self._allowed_remote_exmods) if wait_for_reply: return reply[-1] def send(self, target, ctxt, message, wait_for_reply=None, timeout=None): return self._send(target, ctxt, message, wait_for_reply, timeout) def send_notification(self, target, ctxt, message, version): # NOTE(ewindisch): dot-priority in rpc notifier does not # work with our assumptions. target = target(topic=target.topic.replace('.', '-')) return self._send(target, ctxt, message, envelope=(version == 2.0)) def listen(self, target): conn = create_connection(self.conf) listener = ZmqListener(self) conn.create_consumer(target.topic, listener) conn.create_consumer('%s.%s' % (target.topic, target.server), listener) conn.create_consumer(target.topic, listener, fanout=True) conn.consume_in_thread() return listener def listen_for_notifications(self, targets_and_priorities): # NOTE(sileht): this listener implementation is limited # because zeromq doesn't support requeing message conn = create_connection(self.conf) listener = ZmqListener(self, None) for target, priority in targets_and_priorities: # NOTE(ewindisch): dot-priority in rpc notifier does not # work with our assumptions. # NOTE(sileht): create_consumer doesn't support target.exchange conn.create_consumer('%s-%s' % (target.topic, priority), listener) conn.consume_in_thread() return listener def cleanup(self): cleanup() oslo.messaging-1.3.0/oslo/messaging/_drivers/matchmaker_redis.py0000664000175300017540000001117312316527457026241 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The MatchMaker classes should accept a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. """ from oslo.config import cfg from oslo.messaging._drivers import matchmaker as mm_common from oslo.messaging.openstack.common import importutils redis = importutils.try_import('redis') matchmaker_redis_opts = [ cfg.StrOpt('host', default='127.0.0.1', help='Host to locate redis.'), cfg.IntOpt('port', default=6379, help='Use this port to connect to redis host.'), cfg.StrOpt('password', default=None, help='Password for Redis server (optional).'), ] CONF = cfg.CONF opt_group = cfg.OptGroup(name='matchmaker_redis', title='Options for Redis-based MatchMaker') CONF.register_group(opt_group) CONF.register_opts(matchmaker_redis_opts, opt_group) class RedisExchange(mm_common.Exchange): def __init__(self, matchmaker): self.matchmaker = matchmaker self.redis = matchmaker.redis super(RedisExchange, self).__init__() class RedisTopicExchange(RedisExchange): """Exchange where all topic keys are split, sending to second half. i.e. "compute.host" sends a message to "compute" running on "host" """ def run(self, topic): while True: member_name = self.redis.srandmember(topic) if not member_name: # If this happens, there are no # longer any members. break if not self.matchmaker.is_alive(topic, member_name): continue host = member_name.split('.', 1)[1] return [(member_name, host)] return [] class RedisFanoutExchange(RedisExchange): """Return a list of all hosts.""" def run(self, topic): topic = topic.split('~', 1)[1] hosts = self.redis.smembers(topic) good_hosts = filter( lambda host: self.matchmaker.is_alive(topic, host), hosts) return [(x, x.split('.', 1)[1]) for x in good_hosts] class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase): """MatchMaker registering and looking-up hosts with a Redis server.""" def __init__(self): super(MatchMakerRedis, self).__init__() if not redis: raise ImportError("Failed to import module redis.") self.redis = redis.StrictRedis( host=CONF.matchmaker_redis.host, port=CONF.matchmaker_redis.port, password=CONF.matchmaker_redis.password) self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self)) self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange()) self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self)) def ack_alive(self, key, host): topic = "%s.%s" % (key, host) if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl): # If we could not update the expiration, the key # might have been pruned. Re-register, creating a new # key in Redis. self.register(self.topic_host[host], host) def is_alive(self, topic, host): if self.redis.ttl(host) == -1: self.expire(topic, host) return False return True def expire(self, topic, host): with self.redis.pipeline() as pipe: pipe.multi() pipe.delete(host) pipe.srem(topic, host) pipe.execute() def backend_register(self, key, key_host): with self.redis.pipeline() as pipe: pipe.multi() pipe.sadd(key, key_host) # No value is needed, we just # care if it exists. Sets aren't viable # because only keys can expire. pipe.set(key_host, '') pipe.execute() def backend_unregister(self, key, key_host): with self.redis.pipeline() as pipe: pipe.multi() pipe.srem(key, key_host) pipe.delete(key_host) pipe.execute() oslo.messaging-1.3.0/oslo/messaging/__init__.py0000664000175300017540000000143512316527457022661 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from .exceptions import * from .localcontext import * from .notify import * from .rpc import * from .serializer import * from .server import * from .target import * from .transport import * oslo.messaging-1.3.0/oslo/messaging/notify/0000775000175300017540000000000012316527535022052 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/notify/_impl_noop.py0000664000175300017540000000145312316527457024565 0ustar jenkinsjenkins00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.messaging.notify import notifier class NoOpDriver(notifier._Driver): def notify(self, ctxt, message, priority): pass oslo.messaging-1.3.0/oslo/messaging/notify/_impl_messaging.py0000664000175300017540000000414412316527457025567 0ustar jenkinsjenkins00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo import messaging from oslo.messaging.notify import notifier LOG = logging.getLogger(__name__) class MessagingDriver(notifier._Driver): """Send notifications using the 1.0 message format. This driver sends notifications over the configured messaging transport, but without any message envelope (also known as message format 1.0). This driver should only be used in cases where there are existing consumers deployed which do not support the 2.0 message format. """ def __init__(self, conf, topics, transport, version=1.0): super(MessagingDriver, self).__init__(conf, topics, transport) self.version = version def notify(self, ctxt, message, priority): priority = priority.lower() for topic in self.topics: target = messaging.Target(topic='%s.%s' % (topic, priority)) try: self.transport._send_notification(target, ctxt, message, version=self.version) except Exception: LOG.exception("Could not send notification to %(topic)s. " "Payload=%(message)s", dict(topic=topic, message=message)) class MessagingV2Driver(MessagingDriver): "Send notifications using the 2.0 message format." def __init__(self, conf, **kwargs): super(MessagingV2Driver, self).__init__(conf, version=2.0, **kwargs) oslo.messaging-1.3.0/oslo/messaging/notify/_impl_log.py0000664000175300017540000000232312316527457024370 0ustar jenkinsjenkins00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo.messaging.notify import notifier from oslo.messaging.openstack.common import jsonutils class LogDriver(notifier._Driver): "Publish notifications via Python logging infrastructure." LOGGER_BASE = 'oslo.messaging.notification' def notify(self, ctxt, message, priority): logger = logging.getLogger('%s.%s' % (self.LOGGER_BASE, message['event_type'])) method = getattr(logger, priority.lower(), None) if method: method(jsonutils.dumps(message)) oslo.messaging-1.3.0/oslo/messaging/notify/_impl_routing.py0000664000175300017540000001204512316527457025300 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fnmatch import logging from oslo.config import cfg import six from stevedore import dispatch import yaml from oslo.messaging.notify import notifier from oslo.messaging.openstack.common.gettextutils import _ # noqa LOG = logging.getLogger(__name__) router_config = cfg.StrOpt('routing_notifier_config', default='', help='RoutingNotifier configuration file location.') CONF = cfg.CONF CONF.register_opt(router_config) class RoutingDriver(notifier._Driver): NOTIFIER_PLUGIN_NAMESPACE = 'oslo.messaging.notify.drivers' plugin_manager = None routing_groups = None # The routing groups from the config file. used_drivers = None # Used driver names, extracted from config file. def _should_load_plugin(self, ext, *args, **kwargs): # Hack to keep stevedore from circular importing since these # endpoints are used for different purposes. if ext.name == 'routing': return False return ext.name in self.used_drivers def _get_notifier_config_file(self, filename): """Broken out for testing.""" return file(filename, 'r') def _load_notifiers(self): """One-time load of notifier config file.""" self.routing_groups = {} self.used_drivers = set() filename = CONF.routing_notifier_config if not filename: return # Infer which drivers are used from the config file. self.routing_groups = yaml.load( self._get_notifier_config_file(filename)) if not self.routing_groups: self.routing_groups = {} # In case we got None from load() return for group in self.routing_groups.values(): self.used_drivers.update(group.keys()) LOG.debug(_('loading notifiers from %(namespace)s') % {'namespace': self.NOTIFIER_PLUGIN_NAMESPACE}) self.plugin_manager = dispatch.DispatchExtensionManager( namespace=self.NOTIFIER_PLUGIN_NAMESPACE, check_func=self._should_load_plugin, invoke_on_load=True, invoke_args=None) if not list(self.plugin_manager): LOG.warning(_("Failed to load any notifiers " "for %(namespace)s") % {'namespace': self.NOTIFIER_PLUGIN_NAMESPACE}) def _get_drivers_for_message(self, group, event_type, priority): """Which drivers should be called for this event_type or priority. """ accepted_drivers = set() for driver, rules in six.iteritems(group): checks = [] for key, patterns in six.iteritems(rules): if key == 'accepted_events': c = [fnmatch.fnmatch(event_type, p) for p in patterns] checks.append(any(c)) if key == 'accepted_priorities': c = [fnmatch.fnmatch(priority, p.lower()) for p in patterns] checks.append(any(c)) if all(checks): accepted_drivers.add(driver) return list(accepted_drivers) def _filter_func(self, ext, context, message, accepted_drivers): """True/False if the driver should be called for this message. """ # context is unused here, but passed in by map() return ext.name in accepted_drivers def _call_notify(self, ext, context, message, accepted_drivers): """Emit the notification. """ # accepted_drivers is passed in as a result of the map() function LOG.info(_("Routing '%(event)s' notification to '%(driver)s' driver") % {'event': message.get('event_type'), 'driver': ext.name}) ext.obj.notify(context, message) def notify(self, context, message): if not self.plugin_manager: self._load_notifiers() # Fail if these aren't present ... event_type = message['event_type'] priority = message['priority'].lower() accepted_drivers = set() for group in self.routing_groups.values(): accepted_drivers.update(self._get_drivers_for_message(group, event_type, priority)) self.plugin_manager.map(self._filter_func, self._call_notify, context, message, list(accepted_drivers)) oslo.messaging-1.3.0/oslo/messaging/notify/log_handler.py0000664000175300017540000000322112316527457024703 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo.config import cfg class PublishErrorsHandler(logging.Handler): def __init__(self, *args, **kwargs): # NOTE(dhellmann): Avoid a cyclical import by doing this one # at runtime. from oslo import messaging logging.Handler.__init__(self, *args, **kwargs) self._transport = messaging.get_transport(cfg.CONF) self._notifier = messaging.Notifier(self._transport, publisher_id='error.publisher') def emit(self, record): # NOTE(bnemec): Notifier registers this opt with the transport. if ('log' in self._transport.conf.notification_driver): # NOTE(lbragstad): If we detect that log is one of the # notification drivers, then return. This protects from infinite # recursion where something bad happens, it gets logged, the log # handler sends a notification, and the log_notifier sees the # notification and logs it. return self._notifier.error(None, 'error_notification', dict(error=record.msg)) oslo.messaging-1.3.0/oslo/messaging/notify/notifier.py0000664000175300017540000002322712316527457024254 0ustar jenkinsjenkins00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import logging import uuid from oslo.config import cfg import six from stevedore import named from oslo.messaging.openstack.common import timeutils from oslo.messaging import serializer as msg_serializer _notifier_opts = [ cfg.MultiStrOpt('notification_driver', default=[], help='Driver or drivers to handle sending notifications.'), cfg.ListOpt('notification_topics', default=['notifications', ], deprecated_name='topics', deprecated_group='rpc_notifier2', help='AMQP topic used for OpenStack notifications.'), ] _LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class _Driver(object): def __init__(self, conf, topics, transport): self.conf = conf self.topics = topics self.transport = transport @abc.abstractmethod def notify(self, ctxt, msg, priority): pass class Notifier(object): """Send notification messages. The Notifier class is used for sending notification messages over a messaging transport or other means. Notification messages follow the following format:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} A Notifier object can be instantiated with a transport object and a publisher ID: notifier = notifier.Notifier(get_transport(CONF), 'compute') and notifications are sent via drivers chosen with the notification_driver config option and on the topics consen with the notification_topics config option. Alternatively, a Notifier object can be instantiated with a specific driver or topic:: notifier = notifier.Notifier(RPC_TRANSPORT, 'compute.host', driver='messaging', topic='notifications') Notifier objects are relatively expensive to instantiate (mostly the cost of loading notification drivers), so it is possible to specialize a given Notifier object with a different publisher id using the prepare() method:: notifier = notifier.prepare(publisher_id='compute') notifier.info(ctxt, event_type, payload) """ def __init__(self, transport, publisher_id=None, driver=None, topic=None, serializer=None): """Construct a Notifier object. :param transport: the transport to use for sending messages :type transport: oslo.messaging.Transport :param publisher_id: field in notifications sent, e.g. 'compute.host1' :type publisher_id: str :param driver: a driver to lookup from oslo.messaging.notify.drivers :type driver: str :param topic: the topic which to send messages on :type topic: str :param serializer: an optional entity serializer :type serializer: Serializer """ transport.conf.register_opts(_notifier_opts) self.transport = transport self.publisher_id = publisher_id self._driver_names = ([driver] if driver is not None else transport.conf.notification_driver) self._topics = ([topic] if topic is not None else transport.conf.notification_topics) self._serializer = serializer or msg_serializer.NoOpSerializer() self._driver_mgr = named.NamedExtensionManager( 'oslo.messaging.notify.drivers', names=self._driver_names, invoke_on_load=True, invoke_args=[transport.conf], invoke_kwds={ 'topics': self._topics, 'transport': self.transport, }, ) _marker = object() def prepare(self, publisher_id=_marker): """Return a specialized Notifier instance. Returns a new Notifier instance with the supplied publisher_id. Allows sending notifications from multiple publisher_ids without the overhead of notification driver loading. :param publisher_id: field in notifications sent, e.g. 'compute.host1' :type publisher_id: str """ return _SubNotifier._prepare(self, publisher_id) def _notify(self, ctxt, event_type, payload, priority, publisher_id=None): payload = self._serializer.serialize_entity(ctxt, payload) ctxt = self._serializer.serialize_context(ctxt) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id or self.publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) def do_notify(ext): try: ext.obj.notify(ctxt, msg, priority) except Exception as e: _LOG.exception("Problem '%(e)s' attempting to send to " "notification system. Payload=%(payload)s", dict(e=e, payload=payload)) if self._driver_mgr.extensions: self._driver_mgr.map(do_notify) def audit(self, ctxt, event_type, payload): """Send a notification at audit level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, e.g. 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict """ self._notify(ctxt, event_type, payload, 'AUDIT') def debug(self, ctxt, event_type, payload): """Send a notification at debug level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, e.g. 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict """ self._notify(ctxt, event_type, payload, 'DEBUG') def info(self, ctxt, event_type, payload): """Send a notification at info level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, e.g. 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict """ self._notify(ctxt, event_type, payload, 'INFO') def warn(self, ctxt, event_type, payload): """Send a notification at warning level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, e.g. 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict """ self._notify(ctxt, event_type, payload, 'WARN') warning = warn def error(self, ctxt, event_type, payload): """Send a notification at error level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, e.g. 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict """ self._notify(ctxt, event_type, payload, 'ERROR') def critical(self, ctxt, event_type, payload): """Send a notification at critical level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, e.g. 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict """ self._notify(ctxt, event_type, payload, 'CRITICAL') def sample(self, ctxt, event_type, payload): """Send a notification at sample level. Sample notifications are for high-frequency events that typically contain small payloads. eg: "CPU = 70%" Not all drivers support the sample level (log, for example) so these could be dropped. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, e.g. 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict """ self._notify(ctxt, event_type, payload, 'SAMPLE') class _SubNotifier(Notifier): _marker = Notifier._marker def __init__(self, base, publisher_id): self._base = base self.transport = base.transport self.publisher_id = publisher_id self._serializer = self._base._serializer self._driver_mgr = self._base._driver_mgr def _notify(self, ctxt, event_type, payload, priority): super(_SubNotifier, self)._notify(ctxt, event_type, payload, priority) @classmethod def _prepare(cls, base, publisher_id=_marker): if publisher_id is cls._marker: publisher_id = base.publisher_id return cls(base, publisher_id) oslo.messaging-1.3.0/oslo/messaging/notify/logger.py0000664000175300017540000000530212316527457023706 0ustar jenkinsjenkins00000000000000# Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for the Python logging package that sends log records as a notification. """ import logging from oslo.config import cfg from oslo.messaging.notify import notifier from oslo.messaging import transport class LoggingNotificationHandler(logging.Handler): """Handler for logging to the messaging notification system. Each time the application logs a message using the :py:mod:`logging` module, it will be sent as a notification. The severity used for the notification will be the same as the one used for the log record. This can be used into a Python logging configuration this way:: [handler_notifier] class=oslo.messaging.LoggingNotificationHandler level=ERROR args=('qpid:///') """ CONF = cfg.CONF """Default configuration object used, subclass this class if you want to use another one. """ def __init__(self, url, publisher_id=None, driver=None, topic=None, serializer=None): self.notifier = notifier.Notifier( transport.get_transport(self.CONF, url), publisher_id, driver, topic, serializer() if serializer else None) logging.Handler.__init__(self) def emit(self, record): """Emit the log record to the messaging notification system. :param record: A log record to emit. """ method = getattr(self.notifier, record.levelname.lower(), None) if not method: return method(None, 'logrecord', { 'name': record.name, 'levelno': record.levelno, 'levelname': record.levelname, 'exc_info': record.exc_info, 'pathname': record.pathname, 'lineno': record.lineno, 'msg': record.getMessage(), 'funcName': record.funcName, 'thread': record.thread, 'processName': record.processName, 'process': record.process, 'extra': getattr(record, 'extra', None), }) oslo.messaging-1.3.0/oslo/messaging/notify/__init__.py0000664000175300017540000000163012316527457024166 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['Notifier', 'LoggingNotificationHandler', 'get_notification_listener', 'NotificationResult', 'PublishErrorsHandler'] from .notifier import * from .listener import * from .log_handler import * from .logger import * from .dispatcher import NotificationResult oslo.messaging-1.3.0/oslo/messaging/notify/_impl_test.py0000664000175300017540000000203112316527457024562 0ustar jenkinsjenkins00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.messaging.notify import notifier NOTIFICATIONS = [] def reset(): "Clear out the list of recorded notifications." global NOTIFICATIONS NOTIFICATIONS = [] class TestDriver(notifier._Driver): "Store notifications in memory for test verification." def notify(self, ctxt, message, priority): NOTIFICATIONS.append((ctxt, message, priority)) oslo.messaging-1.3.0/oslo/messaging/notify/listener.py0000664000175300017540000001214712316527457024261 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A notification listener exposes a number of endpoints, each of which contain a set of methods. Each method corresponds to a notification priority. To create a notification listener, you supply a transport, list of targets and a list of endpoints. A transport can be obtained simply by calling the get_transport() method:: transport = messaging.get_transport(conf) which will load the appropriate transport driver according to the user's messaging configuration configuration. See get_transport() for more details. The target supplied when creating a notification listener expresses the topic and - optionally - the exchange to listen on. See Target for more details on these attributes. Notification listener have start(), stop() and wait() messages to begin handling requests, stop handling requests and wait for all in-process requests to complete. Each notification listener is associated with an executor which integrates the listener with a specific I/O handling framework. Currently, there are blocking and eventlet executors available. A simple example of a notification listener with multiple endpoints might be:: from oslo.config import cfg from oslo import messaging class NotificationEndpoint(object): def warn(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) class ErrorEndpoint(object): def error(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) transport = messaging.get_transport(cfg.CONF) targets = [ messaging.Target(topic='notifications') messaging.Target(topic='notifications_bis') ] endpoints = [ NotificationEndpoint(), ErrorEndpoint(), ] server = messaging.get_notification_listener(transport, targets, endpoints) server.start() server.wait() A notifier sends a notification on a topic with a priority, the notification listener will receive this notification if the topic of this one have been set in one of the targets and if an endpoint implements the method named like the priority Parameters to endpoint methods are the request context supplied by the client, the publisher_id of the notification message, the event_type, the payload and metadata. The metadata parameter is a mapping containing a unique message_id and a timestamp. By supplying a serializer object, a listener can deserialize a request context and arguments from - and serialize return values to - primitive types. An endpoint method can explicitly return messaging.NotificationResult.HANDLED to acknowledge a message or messaging.NotificationResult.REQUEUE to requeue the message. The message is acknowledged only if all endpoints either return messaging.NotificationResult.HANDLED or None. Note that not all transport drivers implement support for requeueing. In order to use this feature, applications should assert that the feature is available by passing allow_requeue=True to get_notification_listener(). If the driver does not support requeueing, it will raise NotImplementedError at this point. """ from oslo.messaging.notify import dispatcher as notify_dispatcher from oslo.messaging import server as msg_server def get_notification_listener(transport, targets, endpoints, executor='blocking', serializer=None, allow_requeue=False): """Construct a notification listener The executor parameter controls how incoming messages will be received and dispatched. By default, the most simple executor is used - the blocking executor. :param transport: the messaging transport :type transport: Transport :param targets: the exchanges and topics to listen on :type targets: list of Target :param endpoints: a list of endpoint objects :type endpoints: list :param executor: name of a message executor - e.g. 'eventlet', 'blocking' :type executor: str :param serializer: an optional entity serializer :type serializer: Serializer :param allow_requeue: whether NotificationResult.REQUEUE support is needed :type allow_requeue: bool :raises: NotImplementedError """ transport._require_driver_features(requeue=allow_requeue) dispatcher = notify_dispatcher.NotificationDispatcher(targets, endpoints, serializer, allow_requeue) return msg_server.MessageHandlingServer(transport, dispatcher, executor) oslo.messaging-1.3.0/oslo/messaging/notify/dispatcher.py0000664000175300017540000001077212316527457024564 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import itertools import logging import sys from oslo.messaging import localcontext from oslo.messaging import serializer as msg_serializer LOG = logging.getLogger(__name__) PRIORITIES = ['audit', 'debug', 'info', 'warn', 'error', 'critical', 'sample'] class NotificationResult(object): HANDLED = 'handled' REQUEUE = 'requeue' class NotificationDispatcher(object): """A message dispatcher which understands Notification messages. A MessageHandlingServer is constructed by passing a callable dispatcher which is invoked with context and message dictionaries each time a message is received. NotifcationDispatcher is one such dispatcher which pass a raw notification message to the endpoints """ def __init__(self, targets, endpoints, serializer, allow_requeue): self.targets = targets self.endpoints = endpoints self.serializer = serializer or msg_serializer.NoOpSerializer() self.allow_requeue = allow_requeue self._callbacks_by_priority = {} for endpoint, prio in itertools.product(endpoints, PRIORITIES): if hasattr(endpoint, prio): method = getattr(endpoint, prio) self._callbacks_by_priority.setdefault(prio, []).append(method) priorities = self._callbacks_by_priority.keys() self._targets_priorities = set(itertools.product(self.targets, priorities)) def _listen(self, transport): return transport._listen_for_notifications(self._targets_priorities) @contextlib.contextmanager def __call__(self, incoming): result_wrapper = [] yield lambda: result_wrapper.append( self._dispatch_and_handle_error(incoming)) if result_wrapper[0] == NotificationResult.HANDLED: incoming.acknowledge() else: incoming.requeue() def _dispatch_and_handle_error(self, incoming): """Dispatch a notification message to the appropriate endpoint method. :param incoming: the incoming notification message :type ctxt: IncomingMessage """ try: return self._dispatch(incoming.ctxt, incoming.message) except Exception: # sys.exc_info() is deleted by LOG.exception(). exc_info = sys.exc_info() LOG.error('Exception during message handling', exc_info=exc_info) return NotificationResult.HANDLED def _dispatch(self, ctxt, message): """Dispatch an RPC message to the appropriate endpoint method. :param ctxt: the request context :type ctxt: dict :param message: the message payload :type message: dict """ ctxt = self.serializer.deserialize_context(ctxt) publisher_id = message.get('publisher_id') event_type = message.get('event_type') metadata = { 'message_id': message.get('message_id'), 'timestamp': message.get('timestamp') } priority = message.get('priority', '').lower() if priority not in PRIORITIES: LOG.warning('Unknown priority "%s"' % priority) return payload = self.serializer.deserialize_entity(ctxt, message.get('payload')) for callback in self._callbacks_by_priority.get(priority, []): localcontext.set_local_context(ctxt) try: ret = callback(ctxt, publisher_id, event_type, payload, metadata) ret = NotificationResult.HANDLED if ret is None else ret if self.allow_requeue and ret == NotificationResult.REQUEUE: return ret finally: localcontext.clear_local_context() return NotificationResult.HANDLED oslo.messaging-1.3.0/oslo/messaging/rpc/0000775000175300017540000000000012316527535021326 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/rpc/server.py0000664000175300017540000001301412316527457023210 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ An RPC server exposes a number of endpoints, each of which contain a set of methods which may be invoked remotely by clients over a given transport. To create an RPC server, you supply a transport, target and a list of endpoints. A transport can be obtained simply by calling the get_transport() method:: transport = messaging.get_transport(conf) which will load the appropriate transport driver according to the user's messaging configuration configuration. See get_transport() for more details. The target supplied when creating an RPC server expresses the topic, server name and - optionally - the exchange to listen on. See Target for more details on these attributes. Each endpoint object may have a target attribute which may have namespace and version fields set. By default, we use the 'null namespace' and version 1.0. Incoming method calls will be dispatched to the first endpoint with the requested method, a matching namespace and a compatible version number. RPC servers have start(), stop() and wait() messages to begin handling requests, stop handling requests and wait for all in-process requests to complete. An RPC server class is provided for each supported I/O handling framework. Currently BlockingRPCServer and eventlet.RPCServer are available. A simple example of an RPC server with multiple endpoints might be:: from oslo.config import cfg from oslo import messaging class ServerControlEndpoint(object): target = messaging.Target(namespace='control', version='2.0') def __init__(self, server): self.server = server def stop(self, ctx): self.server.stop() class TestEndpoint(object): def test(self, ctx, arg): return arg transport = messaging.get_transport(cfg.CONF) target = messaging.Target(topic='test', server='server1') endpoints = [ ServerControlEndpoint(self), TestEndpoint(), ] server = messaging.get_rpc_server(transport, target, endpoints) server.start() server.wait() Clients can invoke methods on the server by sending the request to a topic and it gets sent to one of the servers listening on the topic, or by sending the request to a specific server listening on the topic, or by sending the request to all servers listening on the topic (known as fanout). These modes are chosen via the server and fanout attributes on Target but the mode used is transparent to the server. The first parameter to method invocations is always the request context supplied by the client. Parameters to the method invocation are primitive types and so must be the return values from the methods. By supplying a serializer object, a server can deserialize a request context and arguments from - and serialize return values to - primitive types. """ __all__ = [ 'get_rpc_server', 'expected_exceptions', ] from oslo.messaging.rpc import dispatcher as rpc_dispatcher from oslo.messaging import server as msg_server def get_rpc_server(transport, target, endpoints, executor='blocking', serializer=None): """Construct an RPC server. The executor parameter controls how incoming messages will be received and dispatched. By default, the most simple executor is used - the blocking executor. :param transport: the messaging transport :type transport: Transport :param target: the exchange, topic and server to listen on :type target: Target :param endpoints: a list of endpoint objects :type endpoints: list :param executor: name of a message executor - e.g. 'eventlet', 'blocking' :type executor: str :param serializer: an optional entity serializer :type serializer: Serializer """ dispatcher = rpc_dispatcher.RPCDispatcher(target, endpoints, serializer) return msg_server.MessageHandlingServer(transport, dispatcher, executor) def expected_exceptions(*exceptions): """Decorator for RPC endpoint methods that raise expected exceptions. Marking an endpoint method with this decorator allows the declaration of expected exceptions that the RPC server should not consider fatal, and not log as if they were generated in a real error scenario. Note that this will cause listed exceptions to be wrapped in an ExpectedException, which is used internally by the RPC sever. The RPC client will see the original exception type. """ def outer(func): def inner(*args, **kwargs): try: return func(*args, **kwargs) # Take advantage of the fact that we can catch # multiple exception types using a tuple of # exception classes, with subclass detection # for free. Any exception that is not in or # derived from the args passed to us will be # ignored and thrown as normal. except exceptions: raise rpc_dispatcher.ExpectedException() return inner return outer oslo.messaging-1.3.0/oslo/messaging/rpc/__init__.py0000664000175300017540000000166012316527457023445 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'ClientSendError', 'ExpectedException', 'NoSuchMethod', 'RPCClient', 'RPCDispatcher', 'RPCDispatcherError', 'RPCVersionCapError', 'RemoteError', 'UnsupportedVersion', 'expected_exceptions', 'get_rpc_server', ] from .client import * from .dispatcher import * from .server import * oslo.messaging-1.3.0/oslo/messaging/rpc/client.py0000664000175300017540000003340312316527457023164 0ustar jenkinsjenkins00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'ClientSendError', 'RPCClient', 'RPCVersionCapError', 'RemoteError', ] from oslo.config import cfg import six from oslo.messaging._drivers import base as driver_base from oslo.messaging import _utils as utils from oslo.messaging import exceptions from oslo.messaging import serializer as msg_serializer _client_opts = [ cfg.IntOpt('rpc_response_timeout', default=60, help='Seconds to wait for a response from a call.'), ] class RemoteError(exceptions.MessagingException): """Signifies that a remote endpoint method has raised an exception. Contains a string representation of the type of the original exception, the value of the original exception, and the traceback. These are sent to the parent as a joined string so printing the exception contains all of the relevant info. """ def __init__(self, exc_type=None, value=None, traceback=None): self.exc_type = exc_type self.value = value self.traceback = traceback msg = ("Remote error: %(exc_type)s %(value)s\n%(traceback)s." % dict(exc_type=self.exc_type, value=self.value, traceback=self.traceback)) super(RemoteError, self).__init__(msg) class RPCVersionCapError(exceptions.MessagingException): def __init__(self, version, version_cap): self.version = version self.version_cap = version_cap msg = ("Specified RPC version cap, %(version_cap)s, is too low. " "Needs to be higher than %(version)s." % dict(version=self.version, version_cap=self.version_cap)) super(RPCVersionCapError, self).__init__(msg) class ClientSendError(exceptions.MessagingException): """Raised if we failed to send a message to a target.""" def __init__(self, target, ex): msg = 'Failed to send to target "%s": %s' % (target, ex) super(ClientSendError, self).__init__(msg) self.target = target self.ex = ex class _CallContext(object): _marker = object() def __init__(self, transport, target, serializer, timeout=None, version_cap=None): self.conf = transport.conf self.transport = transport self.target = target self.serializer = serializer self.timeout = timeout self.version_cap = version_cap super(_CallContext, self).__init__() def _make_message(self, ctxt, method, args): msg = dict(method=method) msg['args'] = dict() for argname, arg in six.iteritems(args): msg['args'][argname] = self.serializer.serialize_entity(ctxt, arg) if self.target.namespace is not None: msg['namespace'] = self.target.namespace if self.target.version is not None: msg['version'] = self.target.version return msg def _check_version_cap(self, version): if not utils.version_is_compatible(self.version_cap, version): raise RPCVersionCapError(version=version, version_cap=self.version_cap) def can_send_version(self, version=_marker): """Check to see if a version is compatible with the version cap.""" version = self.target.version if version is self._marker else version return (not self.version_cap or utils.version_is_compatible(self.version_cap, self.target.version)) def cast(self, ctxt, method, **kwargs): """Invoke a method and return immediately. See RPCClient.cast().""" msg = self._make_message(ctxt, method, kwargs) ctxt = self.serializer.serialize_context(ctxt) if self.version_cap: self._check_version_cap(msg.get('version')) try: self.transport._send(self.target, ctxt, msg) except driver_base.TransportDriverError as ex: raise ClientSendError(self.target, ex) def call(self, ctxt, method, **kwargs): """Invoke a method and wait for a reply. See RPCClient.call().""" msg = self._make_message(ctxt, method, kwargs) msg_ctxt = self.serializer.serialize_context(ctxt) timeout = self.timeout if self.timeout is None: timeout = self.conf.rpc_response_timeout if self.version_cap: self._check_version_cap(msg.get('version')) try: result = self.transport._send(self.target, msg_ctxt, msg, wait_for_reply=True, timeout=timeout) except driver_base.TransportDriverError as ex: raise ClientSendError(self.target, ex) return self.serializer.deserialize_entity(ctxt, result) @classmethod def _prepare(cls, base, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker): """Prepare a method invocation context. See RPCClient.prepare().""" kwargs = dict( exchange=exchange, topic=topic, namespace=namespace, version=version, server=server, fanout=fanout) kwargs = dict([(k, v) for k, v in kwargs.items() if v is not cls._marker]) target = base.target(**kwargs) if timeout is cls._marker: timeout = base.timeout if version_cap is cls._marker: version_cap = base.version_cap return _CallContext(base.transport, target, base.serializer, timeout, version_cap) def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker): """Prepare a method invocation context. See RPCClient.prepare().""" return self._prepare(self, exchange, topic, namespace, version, server, fanout, timeout, version_cap) class RPCClient(object): """A class for invoking methods on remote servers. The RPCClient class is responsible for sending method invocations to remote servers via a messaging transport. A default target is supplied to the RPCClient constructor, but target attributes can be overridden for individual method invocations using the prepare() method. A method invocation consists of a request context dictionary, a method name and a dictionary of arguments. A cast() invocation just sends the request and returns immediately. A call() invocation waits for the server to send a return value. This class is intended to be used by wrapping it in another class which provides methods on the subclass to perform the remote invocation using call() or cast():: class TestClient(object): def __init__(self, transport): target = messaging.Target(topic='testtopic', version='2.0') self._client = messaging.RPCClient(transport, target) def test(self, ctxt, arg): return self._client.call(ctxt, 'test', arg=arg) An example of using the prepare() method to override some attributes of the default target:: def test(self, ctxt, arg): cctxt = self._client.prepare(version='2.5') return cctxt.call(ctxt, 'test', arg=arg) RPCClient have a number of other properties - for example, timeout and version_cap - which may make sense to override for some method invocations, so they too can be passed to prepare():: def test(self, ctxt, arg): cctxt = self._client.prepare(timeout=10) return cctxt.call(ctxt, 'test', arg=arg) However, this class can be used directly without wrapping it another class. For example:: transport = messaging.get_transport(cfg.CONF) target = messaging.Target(topic='testtopic', version='2.0') client = messaging.RPCClient(transport, target) client.call(ctxt, 'test', arg=arg) but this is probably only useful in limited circumstances as a wrapper class will usually help to make the code much more obvious. """ def __init__(self, transport, target, timeout=None, version_cap=None, serializer=None): """Construct an RPC client. :param transport: a messaging transport handle :type transport: Transport :param target: the default target for invocations :type target: Target :param timeout: an optional default timeout (in seconds) for call()s :type timeout: int or float :param version_cap: raise a RPCVersionCapError version exceeds this cap :type version_cap: str :param serializer: an optional entity serializer :type serializer: Serializer """ self.conf = transport.conf self.conf.register_opts(_client_opts) self.transport = transport self.target = target self.timeout = timeout self.version_cap = version_cap self.serializer = serializer or msg_serializer.NoOpSerializer() super(RPCClient, self).__init__() _marker = _CallContext._marker def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker): """Prepare a method invocation context. Use this method to override client properties for an individual method invocation. For example:: def test(self, ctxt, arg): cctxt = self.prepare(version='2.5') return cctxt.call(ctxt, 'test', arg=arg) :param exchange: see Target.exchange :type exchange: str :param topic: see Target.topic :type topic: str :param namespace: see Target.namespace :type namespace: str :param version: requirement the server must support, see Target.version :type version: str :param server: send to a specific server, see Target.server :type server: str :param fanout: send to all servers on topic, see Target.fanout :type fanout: bool :param timeout: an optional default timeout (in seconds) for call()s :type timeout: int or float :param version_cap: raise a RPCVersionCapError version exceeds this cap :type version_cap: str """ return _CallContext._prepare(self, exchange, topic, namespace, version, server, fanout, timeout, version_cap) def cast(self, ctxt, method, **kwargs): """Invoke a method and return immediately. Method arguments must either be primitive types or types supported by the client's serializer (if any). Similarly, the request context must be a dict unless the client's serializer supports serializing another type. :param ctxt: a request context dict :type ctxt: dict :param method: the method name :type method: str :param kwargs: a dict of method arguments :type kwargs: dict """ self.prepare().cast(ctxt, method, **kwargs) def call(self, ctxt, method, **kwargs): """Invoke a method and wait for a reply. Method arguments must either be primitive types or types supported by the client's serializer (if any). Similarly, the request context must be a dict unless the client's serializer supports serializing another type. The semantics of how any errors raised by the remote RPC endpoint method are handled are quite subtle. Firstly, if the remote exception is contained in one of the modules listed in the allow_remote_exmods messaging.get_transport() parameter, then it this exception will be re-raised by call(). However, such locally re-raised remote exceptions are distinguishable from the same exception type raised locally because re-raised remote exceptions are modified such that their class name ends with the '_Remote' suffix so you may do:: if ex.__class__.__name__.endswith('_Remote'): # Some special case for locally re-raised remote exceptions Secondly, if a remote exception is not from a module listed in the allowed_remote_exmods list, then a messaging.RemoteError exception is raised with all details of the remote exception. :param ctxt: a request context dict :type ctxt: dict :param method: the method name :type method: str :param kwargs: a dict of method arguments :type kwargs: dict :raises: MessagingTimeout, RemoteError """ return self.prepare().call(ctxt, method, **kwargs) def can_send_version(self, version=_marker): """Check to see if a version is compatible with the version cap.""" return self.prepare(version=version).can_send_version() oslo.messaging-1.3.0/oslo/messaging/rpc/dispatcher.py0000664000175300017540000001454212316527457024037 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'NoSuchMethod', 'RPCDispatcher', 'RPCDispatcherError', 'UnsupportedVersion', 'ExpectedException', ] import contextlib import logging import sys import six from oslo.messaging import _utils as utils from oslo.messaging import localcontext from oslo.messaging import serializer as msg_serializer from oslo.messaging import server as msg_server from oslo.messaging import target as msg_target LOG = logging.getLogger(__name__) class ExpectedException(Exception): """Encapsulates an expected exception raised by an RPC endpoint Merely instantiating this exception records the current exception information, which will be passed back to the RPC client without exceptional logging. """ def __init__(self): self.exc_info = sys.exc_info() class RPCDispatcherError(msg_server.MessagingServerError): "A base class for all RPC dispatcher exceptions." class NoSuchMethod(RPCDispatcherError, AttributeError): "Raised if there is no endpoint which exposes the requested method." def __init__(self, method): msg = "Endpoint does not support RPC method %s" % method super(NoSuchMethod, self).__init__(msg) self.method = method class UnsupportedVersion(RPCDispatcherError): "Raised if there is no endpoint which supports the requested version." def __init__(self, version): msg = "Endpoint does not support RPC version %s" % version super(UnsupportedVersion, self).__init__(msg) self.version = version class RPCDispatcher(object): """A message dispatcher which understands RPC messages. A MessageHandlingServer is constructed by passing a callable dispatcher which is invoked with context and message dictionaries each time a message is received. RPCDispatcher is one such dispatcher which understands the format of RPC messages. The dispatcher looks at the namespace, version and method values in the message and matches those against a list of available endpoints. Endpoints may have a target attribute describing the namespace and version of the methods exposed by that object. All public methods on an endpoint object are remotely invokable by clients. """ def __init__(self, target, endpoints, serializer): """Construct a rpc server dispatcher. :param target: the exchange, topic and server to listen on :type target: Target """ self.endpoints = endpoints self.serializer = serializer or msg_serializer.NoOpSerializer() self._default_target = msg_target.Target() self._target = target def _listen(self, transport): return transport._listen(self._target) @staticmethod def _is_namespace(target, namespace): return namespace == target.namespace @staticmethod def _is_compatible(target, version): endpoint_version = target.version or '1.0' return utils.version_is_compatible(endpoint_version, version) def _do_dispatch(self, endpoint, method, ctxt, args): ctxt = self.serializer.deserialize_context(ctxt) new_args = dict() for argname, arg in six.iteritems(args): new_args[argname] = self.serializer.deserialize_entity(ctxt, arg) result = getattr(endpoint, method)(ctxt, **new_args) return self.serializer.serialize_entity(ctxt, result) @contextlib.contextmanager def __call__(self, incoming): incoming.acknowledge() yield lambda: self._dispatch_and_reply(incoming) def _dispatch_and_reply(self, incoming): try: incoming.reply(self._dispatch(incoming.ctxt, incoming.message)) except ExpectedException as e: LOG.debug('Expected exception during message handling (%s)' % e.exc_info[1]) incoming.reply(failure=e.exc_info, log_failure=False) except Exception as e: # sys.exc_info() is deleted by LOG.exception(). exc_info = sys.exc_info() LOG.error('Exception during message handling: %s', e, exc_info=exc_info) incoming.reply(failure=exc_info) # NOTE(dhellmann): Remove circular object reference # between the current stack frame and the traceback in # exc_info. del exc_info def _dispatch(self, ctxt, message): """Dispatch an RPC message to the appropriate endpoint method. :param ctxt: the request context :type ctxt: dict :param message: the message payload :type message: dict :raises: NoSuchMethod, UnsupportedVersion """ method = message.get('method') args = message.get('args', {}) namespace = message.get('namespace') version = message.get('version', '1.0') found_compatible = False for endpoint in self.endpoints: target = getattr(endpoint, 'target', None) if not target: target = self._default_target if not (self._is_namespace(target, namespace) and self._is_compatible(target, version)): continue if hasattr(endpoint, method): localcontext.set_local_context(ctxt) try: return self._do_dispatch(endpoint, method, ctxt, args) finally: localcontext.clear_local_context() found_compatible = True if found_compatible: raise NoSuchMethod(method) else: raise UnsupportedVersion(version) oslo.messaging-1.3.0/oslo/messaging/localcontext.py0000664000175300017540000000320212316527457023613 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'get_local_context', 'set_local_context', 'clear_local_context', ] import threading import uuid _KEY = '_%s_%s' % (__name__.replace('.', '_'), uuid.uuid4().hex) _STORE = threading.local() def get_local_context(ctxt): """Retrieve the RPC endpoint request context for the current thread. This method allows any code running in the context of a dispatched RPC endpoint method to retrieve the context for this request. This is commonly used for logging so that, for example, you can include the request ID, user and tenant in every message logged from a RPC endpoint method. :returns: the context for the request dispatched in the current thread """ return getattr(_STORE, _KEY, None) def set_local_context(ctxt): """Set the request context for the current thread. :param ctxt: a deserialized request context :type ctxt: dict """ setattr(_STORE, _KEY, ctxt) def clear_local_context(): """Clear the request context for the current thread.""" delattr(_STORE, _KEY) oslo.messaging-1.3.0/oslo/messaging/exceptions.py0000664000175300017540000000214212316527457023277 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['MessagingException', 'MessagingTimeout', 'InvalidTarget'] class MessagingException(Exception): """Base class for exceptions.""" class MessagingTimeout(MessagingException): """Raised if message sending times out.""" class InvalidTarget(MessagingException, ValueError): """Raised if a target does not meet certain pre-conditions.""" def __init__(self, msg, target): msg = msg + ":" + str(target) super(InvalidTarget, self).__init__(msg) self.target = target oslo.messaging-1.3.0/oslo/messaging/_cmd/0000775000175300017540000000000012316527535021444 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo/messaging/_cmd/zmq_receiver.py0000775000175300017540000000225312316527457024521 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet eventlet.monkey_patch() import contextlib import logging import sys from oslo.config import cfg from oslo.messaging._drivers import impl_zmq from oslo.messaging._executors import impl_eventlet # FIXME(markmc) CONF = cfg.CONF CONF.register_opts(impl_zmq.zmq_opts) CONF.register_opts(impl_eventlet._eventlet_opts) def main(): CONF(sys.argv[1:], project='oslo') logging.basicConfig(level=logging.DEBUG) with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: reactor.consume_in_thread() reactor.wait() oslo.messaging-1.3.0/oslo/messaging/_cmd/__init__.py0000664000175300017540000000000112316527457023547 0ustar jenkinsjenkins00000000000000 oslo.messaging-1.3.0/oslo/messaging/_utils.py0000664000175300017540000000260612316527457022422 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def version_is_compatible(imp_version, version): """Determine whether versions are compatible. :param imp_version: The version implemented :param version: The version requested by an incoming message. """ version_parts = version.split('.') imp_version_parts = imp_version.split('.') try: rev = version_parts[2] except IndexError: rev = 0 try: imp_rev = imp_version_parts[2] except IndexError: imp_rev = 0 if int(version_parts[0]) != int(imp_version_parts[0]): # Major return False if int(version_parts[1]) > int(imp_version_parts[1]): # Minor return False if (int(version_parts[1]) == int(imp_version_parts[1]) and int(rev) > int(imp_rev)): # Revision return False return True oslo.messaging-1.3.0/oslo/__init__.py0000664000175300017540000000122712316527457020703 0ustar jenkinsjenkins00000000000000 # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __import__('pkg_resources').declare_namespace(__name__) oslo.messaging-1.3.0/MANIFEST.in0000664000175300017540000000033612316527457017354 0ustar jenkinsjenkins00000000000000include AUTHORS include ChangeLog include LICENSE include README.rst include tox.ini recursive-include tests * recursive-include tools * recursive-include doc * exclude .gitignore exclude .gitreview global-exclude *.pyc oslo.messaging-1.3.0/README.rst0000664000175300017540000000035712316527457017310 0ustar jenkinsjenkins00000000000000Oslo Messaging Library ====================== The Oslo messaging API supports RPC and notifications over a number of different messaging transports. See also: `Library Documentation `_ oslo.messaging-1.3.0/.testr.conf0000664000175300017540000000032212316527457017677 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list oslo.messaging-1.3.0/test-requirements.txt0000664000175300017540000000067612316527457022066 0ustar jenkinsjenkins00000000000000# Hacking already pins down pep8, pyflakes and flake8 hacking>=0.8.0,<0.9 discover fixtures>=0.3.14 mock>=1.0 mox3>=0.7.0 python-subunit>=0.0.18 testrepository>=0.0.18 testscenarios>=0.4 testtools>=0.9.34 oslotest # for test_qpid qpid-python # when we can require tox>= 1.4, this can go into tox.ini: # [testenv:cover] # deps = {[testenv]deps} coverage coverage>=3.6 # this is required for the docs build jobs sphinx>=1.1.2,<1.2 oslosphinx oslo.messaging-1.3.0/oslo.messaging.egg-info/0000775000175300017540000000000012316527535022233 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/oslo.messaging.egg-info/namespace_packages.txt0000664000175300017540000000000512316527535026561 0ustar jenkinsjenkins00000000000000oslo oslo.messaging-1.3.0/oslo.messaging.egg-info/PKG-INFO0000664000175300017540000000172012316527535023330 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: oslo.messaging Version: 1.3.0 Summary: Oslo Messaging API Home-page: https://launchpad.net/oslo Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: Oslo Messaging Library ====================== The Oslo messaging API supports RPC and notifications over a number of different messaging transports. See also: `Library Documentation `_ Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 oslo.messaging-1.3.0/oslo.messaging.egg-info/dependency_links.txt0000664000175300017540000000000112316527535026301 0ustar jenkinsjenkins00000000000000 oslo.messaging-1.3.0/oslo.messaging.egg-info/top_level.txt0000664000175300017540000000000512316527535024760 0ustar jenkinsjenkins00000000000000oslo oslo.messaging-1.3.0/oslo.messaging.egg-info/SOURCES.txt0000664000175300017540000000631212316527535024121 0ustar jenkinsjenkins00000000000000.testr.conf AUTHORS CONTRIBUTING.rst ChangeLog LICENSE MANIFEST.in README.rst openstack-common.conf requirements.txt setup.cfg setup.py test-requirements.txt tox.ini doc/source/conf.py doc/source/conffixture.rst doc/source/exceptions.rst doc/source/index.rst doc/source/notification_listener.rst doc/source/notifier.rst doc/source/rpcclient.rst doc/source/serializer.rst doc/source/server.rst doc/source/target.rst doc/source/transport.rst etc/routing_notifier.yaml.sample oslo/__init__.py oslo.messaging.egg-info/PKG-INFO oslo.messaging.egg-info/SOURCES.txt oslo.messaging.egg-info/dependency_links.txt oslo.messaging.egg-info/entry_points.txt oslo.messaging.egg-info/namespace_packages.txt oslo.messaging.egg-info/not-zip-safe oslo.messaging.egg-info/requires.txt oslo.messaging.egg-info/top_level.txt oslo/messaging/__init__.py oslo/messaging/_utils.py oslo/messaging/conffixture.py oslo/messaging/exceptions.py oslo/messaging/localcontext.py oslo/messaging/opts.py oslo/messaging/serializer.py oslo/messaging/server.py oslo/messaging/target.py oslo/messaging/transport.py oslo/messaging/_cmd/__init__.py oslo/messaging/_cmd/zmq_receiver.py oslo/messaging/_drivers/__init__.py oslo/messaging/_drivers/amqp.py oslo/messaging/_drivers/amqpdriver.py oslo/messaging/_drivers/base.py oslo/messaging/_drivers/common.py oslo/messaging/_drivers/impl_fake.py oslo/messaging/_drivers/impl_qpid.py oslo/messaging/_drivers/impl_rabbit.py oslo/messaging/_drivers/impl_zmq.py oslo/messaging/_drivers/matchmaker.py oslo/messaging/_drivers/matchmaker_redis.py oslo/messaging/_drivers/matchmaker_ring.py oslo/messaging/_drivers/pool.py oslo/messaging/_executors/__init__.py oslo/messaging/_executors/base.py oslo/messaging/_executors/impl_blocking.py oslo/messaging/_executors/impl_eventlet.py oslo/messaging/notify/__init__.py oslo/messaging/notify/_impl_log.py oslo/messaging/notify/_impl_messaging.py oslo/messaging/notify/_impl_noop.py oslo/messaging/notify/_impl_routing.py oslo/messaging/notify/_impl_test.py oslo/messaging/notify/dispatcher.py oslo/messaging/notify/listener.py oslo/messaging/notify/log_handler.py oslo/messaging/notify/logger.py oslo/messaging/notify/notifier.py oslo/messaging/openstack/__init__.py oslo/messaging/openstack/common/__init__.py oslo/messaging/openstack/common/excutils.py oslo/messaging/openstack/common/gettextutils.py oslo/messaging/openstack/common/importutils.py oslo/messaging/openstack/common/jsonutils.py oslo/messaging/openstack/common/network_utils.py oslo/messaging/openstack/common/timeutils.py oslo/messaging/openstack/common/py3kcompat/__init__.py oslo/messaging/openstack/common/py3kcompat/urlutils.py oslo/messaging/rpc/__init__.py oslo/messaging/rpc/client.py oslo/messaging/rpc/dispatcher.py oslo/messaging/rpc/server.py tests/__init__.py tests/test_exception_serialization.py tests/test_executor.py tests/test_expected_exceptions.py tests/test_log_handler.py tests/test_notifier.py tests/test_notifier_logger.py tests/test_notify_dispatcher.py tests/test_notify_listener.py tests/test_opts.py tests/test_pool.py tests/test_qpid.py tests/test_rabbit.py tests/test_rpc_client.py tests/test_rpc_dispatcher.py tests/test_rpc_server.py tests/test_target.py tests/test_transport.py tests/test_urls.py tests/test_utils.py tests/utils.pyoslo.messaging-1.3.0/oslo.messaging.egg-info/entry_points.txt0000664000175300017540000000173712316527535025541 0ustar jenkinsjenkins00000000000000[oslo.config.opts] oslo.messaging = oslo.messaging.opts:list_opts [oslo.messaging.notify.drivers] log = oslo.messaging.notify._impl_log:LogDriver messagingv2 = oslo.messaging.notify._impl_messaging:MessagingV2Driver noop = oslo.messaging.notify._impl_noop:NoOpDriver routing = oslo.messaging.notify._impl_routing:RoutingDriver test = oslo.messaging.notify._impl_test:TestDriver messaging = oslo.messaging.notify._impl_messaging:MessagingDriver [oslo.messaging.executors] blocking = oslo.messaging._executors.impl_blocking:BlockingExecutor eventlet = oslo.messaging._executors.impl_eventlet:EventletExecutor [console_scripts] oslo-messaging-zmq-receiver = oslo.messaging._cmd.zmq_receiver:main [oslo.messaging.drivers] qpid = oslo.messaging._drivers.impl_qpid:QpidDriver zmq = oslo.messaging._drivers.impl_zmq:ZmqDriver kombu = oslo.messaging._drivers.impl_rabbit:RabbitDriver rabbit = oslo.messaging._drivers.impl_rabbit:RabbitDriver fake = oslo.messaging._drivers.impl_fake:FakeDriver oslo.messaging-1.3.0/oslo.messaging.egg-info/not-zip-safe0000664000175300017540000000000112316527462024460 0ustar jenkinsjenkins00000000000000 oslo.messaging-1.3.0/oslo.messaging.egg-info/requires.txt0000664000175300017540000000016312316527535024633 0ustar jenkinsjenkins00000000000000oslo.config>=1.2.0 stevedore>=0.14 iso8601>=0.1.9 six>=1.5.2 eventlet>=0.13.0 Babel>=1.3 PyYAML>=3.1.0 kombu>=2.4.8oslo.messaging-1.3.0/tests/0000775000175300017540000000000012316527535016753 5ustar jenkinsjenkins00000000000000oslo.messaging-1.3.0/tests/test_transport.py0000664000175300017540000002606012316527457022427 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from mox3 import mox from oslo.config import cfg import six from stevedore import driver import testscenarios from oslo import messaging from oslo.messaging import transport from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class _FakeDriver(object): def __init__(self, conf): self.conf = conf def send(self, *args, **kwargs): pass def send_notification(self, *args, **kwargs): pass def listen(self, target): pass class _FakeManager(object): def __init__(self, driver): self.driver = driver class GetTransportTestCase(test_utils.BaseTestCase): scenarios = [ ('rpc_backend', dict(url=None, transport_url=None, rpc_backend='testbackend', control_exchange=None, allowed=None, aliases=None, expect=dict(backend='testbackend', exchange=None, url='testbackend:', allowed=[]))), ('transport_url', dict(url=None, transport_url='testtransport:', rpc_backend=None, control_exchange=None, allowed=None, aliases=None, expect=dict(backend='testtransport', exchange=None, url='testtransport:', allowed=[]))), ('url_param', dict(url='testtransport:', transport_url=None, rpc_backend=None, control_exchange=None, allowed=None, aliases=None, expect=dict(backend='testtransport', exchange=None, url='testtransport:', allowed=[]))), ('control_exchange', dict(url=None, transport_url=None, rpc_backend='testbackend', control_exchange='testexchange', allowed=None, aliases=None, expect=dict(backend='testbackend', exchange='testexchange', url='testbackend:', allowed=[]))), ('allowed_remote_exmods', dict(url=None, transport_url=None, rpc_backend='testbackend', control_exchange=None, allowed=['foo', 'bar'], aliases=None, expect=dict(backend='testbackend', exchange=None, url='testbackend:', allowed=['foo', 'bar']))), ('rpc_backend_aliased', dict(url=None, transport_url=None, rpc_backend='testfoo', control_exchange=None, allowed=None, aliases=dict(testfoo='testbackend'), expect=dict(backend='testbackend', exchange=None, url='testbackend:', allowed=[]))), ('transport_url_aliased', dict(url=None, transport_url='testfoo:', rpc_backend=None, control_exchange=None, allowed=None, aliases=dict(testfoo='testtransport'), expect=dict(backend='testtransport', exchange=None, url='testtransport:', allowed=[]))), ('url_param_aliased', dict(url='testfoo:', transport_url=None, rpc_backend=None, control_exchange=None, allowed=None, aliases=dict(testfoo='testtransport'), expect=dict(backend='testtransport', exchange=None, url='testtransport:', allowed=[]))), ] def test_get_transport(self): self.config(rpc_backend=self.rpc_backend, control_exchange=self.control_exchange, transport_url=self.transport_url) self.mox.StubOutWithMock(driver, 'DriverManager') invoke_args = [self.conf, messaging.TransportURL.parse(self.conf, self.expect['url'])] invoke_kwds = dict(default_exchange=self.expect['exchange'], allowed_remote_exmods=self.expect['allowed']) drvr = _FakeDriver(self.conf) driver.DriverManager('oslo.messaging.drivers', self.expect['backend'], invoke_on_load=True, invoke_args=invoke_args, invoke_kwds=invoke_kwds).\ AndReturn(_FakeManager(drvr)) self.mox.ReplayAll() kwargs = dict(url=self.url) if self.allowed is not None: kwargs['allowed_remote_exmods'] = self.allowed if self.aliases is not None: kwargs['aliases'] = self.aliases transport_ = messaging.get_transport(self.conf, **kwargs) self.assertIsNotNone(transport_) self.assertIs(transport_.conf, self.conf) self.assertIs(transport_._driver, drvr) class GetTransportSadPathTestCase(test_utils.BaseTestCase): scenarios = [ ('invalid_transport_url', dict(url=None, transport_url='invalid', rpc_backend=None, ex=dict(cls=messaging.InvalidTransportURL, msg_contains='No scheme specified', url='invalid'))), ('invalid_url_param', dict(url='invalid', transport_url=None, rpc_backend=None, ex=dict(cls=messaging.InvalidTransportURL, msg_contains='No scheme specified', url='invalid'))), ('driver_load_failure', dict(url=None, transport_url=None, rpc_backend='testbackend', ex=dict(cls=messaging.DriverLoadFailure, msg_contains='Failed to load', driver='testbackend'))), ] def test_get_transport_sad(self): self.config(rpc_backend=self.rpc_backend, transport_url=self.transport_url) if self.rpc_backend: self.mox.StubOutWithMock(driver, 'DriverManager') invoke_args = [self.conf, messaging.TransportURL.parse(self.conf, self.url)] invoke_kwds = dict(default_exchange='openstack', allowed_remote_exmods=[]) driver.DriverManager('oslo.messaging.drivers', self.rpc_backend, invoke_on_load=True, invoke_args=invoke_args, invoke_kwds=invoke_kwds).\ AndRaise(RuntimeError()) self.mox.ReplayAll() try: messaging.get_transport(self.conf, url=self.url) self.assertFalse(True) except Exception as ex: ex_cls = self.ex.pop('cls') ex_msg_contains = self.ex.pop('msg_contains') self.assertIsInstance(ex, messaging.MessagingException) self.assertIsInstance(ex, ex_cls) self.assertIn(ex_msg_contains, six.text_type(ex)) for k, v in self.ex.items(): self.assertTrue(hasattr(ex, k)) self.assertEqual(str(getattr(ex, k)), v) # FIXME(markmc): this could be used elsewhere class _SetDefaultsFixture(fixtures.Fixture): def __init__(self, set_defaults, opts, *names): super(_SetDefaultsFixture, self).__init__() self.set_defaults = set_defaults self.opts = opts self.names = names def setUp(self): super(_SetDefaultsFixture, self).setUp() # FIXME(markmc): this comes from Id5c1f3ba def first(seq, default=None, key=None): if key is None: key = bool return next(six.moves.filter(key, seq), default) def default(opts, name): return first(opts, key=lambda o: o.name == name).default orig_defaults = {} for n in self.names: orig_defaults[n] = default(self.opts, n) def restore_defaults(): self.set_defaults(**orig_defaults) self.addCleanup(restore_defaults) class TestSetDefaults(test_utils.BaseTestCase): def setUp(self): super(TestSetDefaults, self).setUp(conf=cfg.ConfigOpts()) self.useFixture(_SetDefaultsFixture(messaging.set_transport_defaults, transport._transport_opts, 'control_exchange')) def test_set_default_control_exchange(self): messaging.set_transport_defaults(control_exchange='foo') self.mox.StubOutWithMock(driver, 'DriverManager') invoke_kwds = mox.ContainsKeyValue('default_exchange', 'foo') driver.DriverManager(mox.IgnoreArg(), mox.IgnoreArg(), invoke_on_load=mox.IgnoreArg(), invoke_args=mox.IgnoreArg(), invoke_kwds=invoke_kwds).\ AndReturn(_FakeManager(_FakeDriver(self.conf))) self.mox.ReplayAll() messaging.get_transport(self.conf) class TestTransportMethodArgs(test_utils.BaseTestCase): _target = messaging.Target(topic='topic', server='server') def test_send_defaults(self): t = transport.Transport(_FakeDriver(cfg.CONF)) self.mox.StubOutWithMock(t._driver, 'send') t._driver.send(self._target, 'ctxt', 'message', wait_for_reply=None, timeout=None) self.mox.ReplayAll() t._send(self._target, 'ctxt', 'message') def test_send_all_args(self): t = transport.Transport(_FakeDriver(cfg.CONF)) self.mox.StubOutWithMock(t._driver, 'send') t._driver.send(self._target, 'ctxt', 'message', wait_for_reply='wait_for_reply', timeout='timeout') self.mox.ReplayAll() t._send(self._target, 'ctxt', 'message', wait_for_reply='wait_for_reply', timeout='timeout') def test_send_notification(self): t = transport.Transport(_FakeDriver(cfg.CONF)) self.mox.StubOutWithMock(t._driver, 'send_notification') t._driver.send_notification(self._target, 'ctxt', 'message', 1.0) self.mox.ReplayAll() t._send_notification(self._target, 'ctxt', 'message', version=1.0) def test_listen(self): t = transport.Transport(_FakeDriver(cfg.CONF)) self.mox.StubOutWithMock(t._driver, 'listen') t._driver.listen(self._target) self.mox.ReplayAll() t._listen(self._target) oslo.messaging-1.3.0/tests/test_target.py0000664000175300017540000001541712316527457021665 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from oslo import messaging from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TargetConstructorTestCase(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(kwargs=dict())), ('exchange', dict(kwargs=dict(exchange='testexchange'))), ('topic', dict(kwargs=dict(topic='testtopic'))), ('namespace', dict(kwargs=dict(namespace='testnamespace'))), ('version', dict(kwargs=dict(version='3.4'))), ('server', dict(kwargs=dict(server='testserver'))), ('fanout', dict(kwargs=dict(fanout=True))), ] def test_constructor(self): target = messaging.Target(**self.kwargs) for k in self.kwargs: self.assertEqual(getattr(target, k), self.kwargs[k]) for k in ['exchange', 'topic', 'namespace', 'version', 'server', 'fanout']: if k in self.kwargs: continue self.assertIsNone(getattr(target, k)) class TargetCallableTestCase(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(attrs=dict(), kwargs=dict(), vals=dict())), ('exchange_attr', dict(attrs=dict(exchange='testexchange'), kwargs=dict(), vals=dict(exchange='testexchange'))), ('exchange_arg', dict(attrs=dict(), kwargs=dict(exchange='testexchange'), vals=dict(exchange='testexchange'))), ('topic_attr', dict(attrs=dict(topic='testtopic'), kwargs=dict(), vals=dict(topic='testtopic'))), ('topic_arg', dict(attrs=dict(), kwargs=dict(topic='testtopic'), vals=dict(topic='testtopic'))), ('namespace_attr', dict(attrs=dict(namespace='testnamespace'), kwargs=dict(), vals=dict(namespace='testnamespace'))), ('namespace_arg', dict(attrs=dict(), kwargs=dict(namespace='testnamespace'), vals=dict(namespace='testnamespace'))), ('version_attr', dict(attrs=dict(version='3.4'), kwargs=dict(), vals=dict(version='3.4'))), ('version_arg', dict(attrs=dict(), kwargs=dict(version='3.4'), vals=dict(version='3.4'))), ('server_attr', dict(attrs=dict(server='testserver'), kwargs=dict(), vals=dict(server='testserver'))), ('server_arg', dict(attrs=dict(), kwargs=dict(server='testserver'), vals=dict(server='testserver'))), ('fanout_attr', dict(attrs=dict(fanout=True), kwargs=dict(), vals=dict(fanout=True))), ('fanout_arg', dict(attrs=dict(), kwargs=dict(fanout=True), vals=dict(fanout=True))), ] def test_callable(self): target = messaging.Target(**self.attrs) target = target(**self.kwargs) for k in self.vals: self.assertEqual(getattr(target, k), self.vals[k]) for k in ['exchange', 'topic', 'namespace', 'version', 'server', 'fanout']: if k in self.vals: continue self.assertIsNone(getattr(target, k)) class TargetReprTestCase(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(kwargs=dict(), repr='')), ('exchange', dict(kwargs=dict(exchange='testexchange'), repr='exchange=testexchange')), ('topic', dict(kwargs=dict(topic='testtopic'), repr='topic=testtopic')), ('namespace', dict(kwargs=dict(namespace='testnamespace'), repr='namespace=testnamespace')), ('version', dict(kwargs=dict(version='3.4'), repr='version=3.4')), ('server', dict(kwargs=dict(server='testserver'), repr='server=testserver')), ('fanout', dict(kwargs=dict(fanout=True), repr='fanout=True')), ('exchange_and_fanout', dict(kwargs=dict(exchange='testexchange', fanout=True), repr='exchange=testexchange, ' 'fanout=True')), ] def test_repr(self): target = messaging.Target(**self.kwargs) self.assertEqual(str(target), '') _notset = object() class EqualityTestCase(test_utils.BaseTestCase): @classmethod def generate_scenarios(cls): attr = [ ('exchange', dict(attr='exchange')), ('topic', dict(attr='topic')), ('namespace', dict(attr='namespace')), ('version', dict(attr='version')), ('server', dict(attr='server')), ('fanout', dict(attr='fanout')), ] a = [ ('a_notset', dict(a_value=_notset)), ('a_none', dict(a_value=None)), ('a_empty', dict(a_value='')), ('a_foo', dict(a_value='foo')), ('a_bar', dict(a_value='bar')), ] b = [ ('b_notset', dict(b_value=_notset)), ('b_none', dict(b_value=None)), ('b_empty', dict(b_value='')), ('b_foo', dict(b_value='foo')), ('b_bar', dict(b_value='bar')), ] cls.scenarios = testscenarios.multiply_scenarios(attr, a, b) for s in cls.scenarios: s[1]['equals'] = (s[1]['a_value'] == s[1]['b_value']) def test_equality(self): a_kwargs = {self.attr: self.a_value} b_kwargs = {self.attr: self.b_value} a = messaging.Target(**a_kwargs) b = messaging.Target(**b_kwargs) if self.equals: self.assertEqual(a, b) self.assertFalse(a != b) else: self.assertNotEqual(a, b) self.assertFalse(a == b) EqualityTestCase.generate_scenarios() oslo.messaging-1.3.0/tests/test_notify_dispatcher.py0000664000175300017540000001327312316527457024113 0ustar jenkinsjenkins00000000000000 # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import mock import testscenarios from oslo import messaging from oslo.messaging.notify import dispatcher as notify_dispatcher from oslo.messaging.openstack.common import timeutils from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios notification_msg = dict( publisher_id="publisher_id", event_type="compute.start", payload={"info": "fuu"}, message_id="uuid", timestamp=str(timeutils.utcnow()) ) class TestDispatcher(test_utils.BaseTestCase): scenarios = [ ('no_endpoints', dict(endpoints=[], endpoints_expect_calls=[], priority='info', ex=None, return_value=messaging.NotificationResult.HANDLED)), ('one_endpoints', dict(endpoints=[['warn']], endpoints_expect_calls=['warn'], priority='warn', ex=None, return_value=messaging.NotificationResult.HANDLED)), ('two_endpoints_only_one_match', dict(endpoints=[['warn'], ['info']], endpoints_expect_calls=[None, 'info'], priority='info', ex=None, return_value=messaging.NotificationResult.HANDLED)), ('two_endpoints_both_match', dict(endpoints=[['debug', 'info'], ['info', 'debug']], endpoints_expect_calls=['debug', 'debug'], priority='debug', ex=None, return_value=messaging.NotificationResult.HANDLED)), ('no_return_value', dict(endpoints=[['warn']], endpoints_expect_calls=['warn'], priority='warn', ex=None, return_value=None)), ('requeue', dict(endpoints=[['debug', 'warn']], endpoints_expect_calls=['debug'], priority='debug', msg=notification_msg, ex=None, return_value=messaging.NotificationResult.REQUEUE)), ('exception', dict(endpoints=[['debug', 'warn']], endpoints_expect_calls=['debug'], priority='debug', msg=notification_msg, ex=Exception, return_value=messaging.NotificationResult.HANDLED)), ] def test_dispatcher(self): endpoints = [] for endpoint_methods in self.endpoints: e = mock.Mock(spec=endpoint_methods) endpoints.append(e) for m in endpoint_methods: method = getattr(e, m) if self.ex: method.side_effect = self.ex() else: method.return_value = self.return_value msg = notification_msg.copy() msg['priority'] = self.priority targets = [messaging.Target(topic='notifications')] dispatcher = notify_dispatcher.NotificationDispatcher( targets, endpoints, None, allow_requeue=True) # check it listen on wanted topics self.assertEqual(sorted(dispatcher._targets_priorities), sorted(set((targets[0], prio) for prio in itertools.chain.from_iterable( self.endpoints)))) incoming = mock.Mock(ctxt={}, message=msg) with dispatcher(incoming) as callback: callback() # check endpoint callbacks are called or not for i, endpoint_methods in enumerate(self.endpoints): for m in endpoint_methods: if m == self.endpoints_expect_calls[i]: method = getattr(endpoints[i], m) expected = [mock.call({}, msg['publisher_id'], msg['event_type'], msg['payload'], { 'timestamp': mock.ANY, 'message_id': mock.ANY })] self.assertEqual(method.call_args_list, expected) else: self.assertEqual(endpoints[i].call_count, 0) if self.ex: self.assertEqual(incoming.acknowledge.call_count, 1) self.assertEqual(incoming.requeue.call_count, 0) elif self.return_value == messaging.NotificationResult.HANDLED \ or self.return_value is None: self.assertEqual(incoming.acknowledge.call_count, 1) self.assertEqual(incoming.requeue.call_count, 0) elif self.return_value == messaging.NotificationResult.REQUEUE: self.assertEqual(incoming.acknowledge.call_count, 0) self.assertEqual(incoming.requeue.call_count, 1) @mock.patch('oslo.messaging.notify.dispatcher.LOG') def test_dispatcher_unknown_prio(self, mylog): msg = notification_msg.copy() msg['priority'] = 'what???' dispatcher = notify_dispatcher.NotificationDispatcher( [mock.Mock()], [mock.Mock()], None, allow_requeue=True) with dispatcher(mock.Mock(ctxt={}, message=msg)) as callback: callback() mylog.warning.assert_called_once_with('Unknown priority "what???"') oslo.messaging-1.3.0/tests/test_exception_serialization.py0000664000175300017540000002574012316527457025332 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import six import testscenarios from oslo import messaging from oslo.messaging._drivers import common as exceptions from oslo.messaging.openstack.common import jsonutils from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins' class NovaStyleException(Exception): format = 'I am Nova' def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: message = self.format % kwargs super(NovaStyleException, self).__init__(message) class KwargsStyleException(NovaStyleException): format = 'I am %(who)s' def add_remote_postfix(ex): ex_type = type(ex) message = str(ex) str_override = lambda self: message new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), {'__str__': str_override, '__unicode__': str_override}) new_ex_type.__module__ = '%s_Remote' % ex.__class__.__module__ try: ex.__class__ = new_ex_type except TypeError: ex.args = (message,) + ex.args[1:] return ex class SerializeRemoteExceptionTestCase(test_utils.BaseTestCase): _log_failure = [ ('log_failure', dict(log_failure=True)), ('do_not_log_failure', dict(log_failure=False)), ] _add_remote = [ ('add_remote', dict(add_remote=True)), ('do_not_add_remote', dict(add_remote=False)), ] _exception_types = [ ('bog_standard', dict(cls=Exception, args=['test'], kwargs={}, clsname='Exception', modname=EXCEPTIONS_MODULE, msg='test')), ('nova_style', dict(cls=NovaStyleException, args=[], kwargs={}, clsname='NovaStyleException', modname=__name__, msg='I am Nova')), ('nova_style_with_msg', dict(cls=NovaStyleException, args=['testing'], kwargs={}, clsname='NovaStyleException', modname=__name__, msg='testing')), ('kwargs_style', dict(cls=KwargsStyleException, args=[], kwargs={'who': 'Oslo'}, clsname='KwargsStyleException', modname=__name__, msg='I am Oslo')), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._log_failure, cls._add_remote, cls._exception_types) def setUp(self): super(SerializeRemoteExceptionTestCase, self).setUp() def test_serialize_remote_exception(self): errors = [] def stub_error(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] errors.append(str(msg) % a) self.stubs.Set(exceptions.LOG, 'error', stub_error) try: try: raise self.cls(*self.args, **self.kwargs) except Exception as ex: cls_error = ex if self.add_remote: ex = add_remote_postfix(ex) raise ex except Exception: exc_info = sys.exc_info() serialized = exceptions.serialize_remote_exception( exc_info, log_failure=self.log_failure) failure = jsonutils.loads(serialized) self.assertEqual(failure['class'], self.clsname, failure) self.assertEqual(failure['module'], self.modname) self.assertEqual(failure['message'], self.msg) self.assertEqual(failure['args'], [self.msg]) self.assertEqual(failure['kwargs'], self.kwargs) # Note: _Remote prefix not stripped from tracebacks tb = cls_error.__class__.__name__ + ': ' + self.msg self.assertIn(tb, ''.join(failure['tb'])) if self.log_failure: self.assertTrue(len(errors) > 0, errors) else: self.assertEqual(len(errors), 0, errors) SerializeRemoteExceptionTestCase.generate_scenarios() class DeserializeRemoteExceptionTestCase(test_utils.BaseTestCase): _standard_allowed = [__name__] scenarios = [ ('bog_standard', dict(allowed=_standard_allowed, clsname='Exception', modname=EXCEPTIONS_MODULE, cls=Exception, args=['test'], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='Exception', remote_args=('test\ntraceback\ntraceback\n', ), remote_kwargs={})), ('nova_style', dict(allowed=_standard_allowed, clsname='NovaStyleException', modname=__name__, cls=NovaStyleException, args=[], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='NovaStyleException_Remote', remote_args=('I am Nova', ), remote_kwargs={})), ('nova_style_with_msg', dict(allowed=_standard_allowed, clsname='NovaStyleException', modname=__name__, cls=NovaStyleException, args=['testing'], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='NovaStyleException_Remote', remote_args=('testing', ), remote_kwargs={})), ('kwargs_style', dict(allowed=_standard_allowed, clsname='KwargsStyleException', modname=__name__, cls=KwargsStyleException, args=[], kwargs={'who': 'Oslo'}, str='test\ntraceback\ntraceback\n', remote_name='KwargsStyleException_Remote', remote_args=('I am Oslo', ), remote_kwargs={})), ('not_allowed', dict(allowed=[], clsname='NovaStyleException', modname=__name__, cls=messaging.RemoteError, args=[], kwargs={}, str=("Remote error: NovaStyleException test\n" "[%r]." % u'traceback\ntraceback\n'), msg=("Remote error: NovaStyleException test\n" "[%r]." % u'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'NovaStyleException', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('unknown_module', dict(allowed=['notexist'], clsname='Exception', modname='notexist', cls=messaging.RemoteError, args=[], kwargs={}, str=("Remote error: Exception test\n" "[%r]." % u'traceback\ntraceback\n'), msg=("Remote error: Exception test\n" "[%r]." % u'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'Exception', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('unknown_exception', dict(allowed=[], clsname='FarcicalError', modname=EXCEPTIONS_MODULE, cls=messaging.RemoteError, args=[], kwargs={}, str=("Remote error: FarcicalError test\n" "[%r]." % u'traceback\ntraceback\n'), msg=("Remote error: FarcicalError test\n" "[%r]." % u'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'FarcicalError', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('unknown_kwarg', dict(allowed=[], clsname='Exception', modname=EXCEPTIONS_MODULE, cls=messaging.RemoteError, args=[], kwargs={'foobar': 'blaa'}, str=("Remote error: Exception test\n" "[%r]." % u'traceback\ntraceback\n'), msg=("Remote error: Exception test\n" "[%r]." % u'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'Exception', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('system_exit', dict(allowed=[], clsname='SystemExit', modname=EXCEPTIONS_MODULE, cls=messaging.RemoteError, args=[], kwargs={}, str=("Remote error: SystemExit test\n" "[%r]." % u'traceback\ntraceback\n'), msg=("Remote error: SystemExit test\n" "[%r]." % u'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'SystemExit', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ] def test_deserialize_remote_exception(self): failure = { 'class': self.clsname, 'module': self.modname, 'message': 'test', 'tb': ['traceback\ntraceback\n'], 'args': self.args, 'kwargs': self.kwargs, } serialized = jsonutils.dumps(failure) ex = exceptions.deserialize_remote_exception(serialized, self.allowed) self.assertIsInstance(ex, self.cls) self.assertEqual(ex.__class__.__name__, self.remote_name) self.assertEqual(six.text_type(ex), self.str) if hasattr(self, 'msg'): self.assertEqual(six.text_type(ex), self.msg) self.assertEqual(ex.args, (self.msg,) + self.remote_args) else: self.assertEqual(ex.args, self.remote_args) oslo.messaging-1.3.0/tests/test_expected_exceptions.py0000664000175300017540000000402612316527457024433 0ustar jenkinsjenkins00000000000000 # Copyright 2012 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import messaging from tests import utils as test_utils class TestExpectedExceptions(test_utils.BaseTestCase): def test_exception(self): e = None try: try: raise ValueError() except Exception: raise messaging.ExpectedException() except messaging.ExpectedException as e: self.assertIsInstance(e, messaging.ExpectedException) self.assertTrue(hasattr(e, 'exc_info')) self.assertIsInstance(e.exc_info[1], ValueError) def test_decorator_expected(self): class FooException(Exception): pass @messaging.expected_exceptions(FooException) def naughty(): raise FooException() self.assertRaises(messaging.ExpectedException, naughty) def test_decorator_expected_subclass(self): class FooException(Exception): pass class BarException(FooException): pass @messaging.expected_exceptions(FooException) def naughty(): raise BarException() self.assertRaises(messaging.ExpectedException, naughty) def test_decorator_unexpected(self): class FooException(Exception): pass @messaging.expected_exceptions(FooException) def really_naughty(): raise ValueError() self.assertRaises(ValueError, really_naughty) oslo.messaging-1.3.0/tests/test_rabbit.py0000664000175300017540000005372012316527457021641 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import sys import threading import uuid import fixtures import kombu import testscenarios from oslo import messaging from oslo.messaging._drivers import amqpdriver from oslo.messaging._drivers import common as driver_common from oslo.messaging._drivers import impl_rabbit as rabbit_driver from oslo.messaging.openstack.common import jsonutils from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TestRabbitDriverLoad(test_utils.BaseTestCase): def setUp(self): super(TestRabbitDriverLoad, self).setUp() self.messaging_conf.transport_driver = 'rabbit' self.messaging_conf.in_memory = True def test_driver_load(self): transport = messaging.get_transport(self.conf) self.assertIsInstance(transport._driver, rabbit_driver.RabbitDriver) class TestRabbitTransportURL(test_utils.BaseTestCase): scenarios = [ ('none', dict(url=None, expected=None)), ('empty', dict(url='rabbit:///', expected=dict(virtual_host=''))), ('localhost', dict(url='rabbit://localhost/', expected=dict(hostname='localhost', username='', password='', virtual_host=''))), ('virtual_host', dict(url='rabbit:///vhost', expected=dict(virtual_host='vhost'))), ('no_creds', dict(url='rabbit://host/virtual_host', expected=dict(hostname='host', username='', password='', virtual_host='virtual_host'))), ('no_port', dict(url='rabbit://user:password@host/virtual_host', expected=dict(hostname='host', username='user', password='password', virtual_host='virtual_host'))), ('full_url', dict(url='rabbit://user:password@host:10/virtual_host', expected=dict(hostname='host', port=10, username='user', password='password', virtual_host='virtual_host'))), ] def setUp(self): super(TestRabbitTransportURL, self).setUp() self.messaging_conf.transport_driver = 'rabbit' self.messaging_conf.in_memory = True self._server_params = [] cnx_init = rabbit_driver.Connection.__init__ def record_params(cnx, conf, server_params=None): self._server_params.append(server_params) return cnx_init(cnx, conf, server_params) def dummy_send(cnx, topic, msg, timeout=None): pass self.stubs.Set(rabbit_driver.Connection, '__init__', record_params) self.stubs.Set(rabbit_driver.Connection, 'topic_send', dummy_send) self._driver = messaging.get_transport(self.conf, self.url)._driver self._target = messaging.Target(topic='testtopic') def test_transport_url_listen(self): self._driver.listen(self._target) self.assertEqual(self._server_params[0], self.expected) def test_transport_url_listen_for_notification(self): self._driver.listen_for_notifications( [(messaging.Target(topic='topic'), 'info')]) self.assertEqual(self._server_params[0], self.expected) def test_transport_url_send(self): self._driver.send(self._target, {}, {}) self.assertEqual(self._server_params[0], self.expected) class TestSendReceive(test_utils.BaseTestCase): _n_senders = [ ('single_sender', dict(n_senders=1)), ('multiple_senders', dict(n_senders=10)), ] _context = [ ('empty_context', dict(ctxt={})), ('with_context', dict(ctxt={'user': 'mark'})), ] _reply = [ ('rx_id', dict(rx_id=True, reply=None)), ('none', dict(rx_id=False, reply=None)), ('empty_list', dict(rx_id=False, reply=[])), ('empty_dict', dict(rx_id=False, reply={})), ('false', dict(rx_id=False, reply=False)), ('zero', dict(rx_id=False, reply=0)), ] _failure = [ ('success', dict(failure=False)), ('failure', dict(failure=True, expected=False)), ('expected_failure', dict(failure=True, expected=True)), ] _timeout = [ ('no_timeout', dict(timeout=None)), ('timeout', dict(timeout=0.01)), # FIXME(markmc): timeout=0 is broken? ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._n_senders, cls._context, cls._reply, cls._failure, cls._timeout) def setUp(self): super(TestSendReceive, self).setUp() self.messaging_conf.transport_driver = 'rabbit' self.messaging_conf.in_memory = True def test_send_receive(self): transport = messaging.get_transport(self.conf) self.addCleanup(transport.cleanup) driver = transport._driver target = messaging.Target(topic='testtopic') listener = driver.listen(target) senders = [] replies = [] msgs = [] errors = [] def stub_error(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] errors.append(str(msg) % a) self.stubs.Set(driver_common.LOG, 'error', stub_error) def send_and_wait_for_reply(i): try: replies.append(driver.send(target, self.ctxt, {'tx_id': i}, wait_for_reply=True, timeout=self.timeout)) self.assertFalse(self.failure) self.assertIsNone(self.timeout) except (ZeroDivisionError, messaging.MessagingTimeout) as e: replies.append(e) self.assertTrue(self.failure or self.timeout is not None) while len(senders) < self.n_senders: senders.append(threading.Thread(target=send_and_wait_for_reply, args=(len(senders), ))) for i in range(len(senders)): senders[i].start() received = listener.poll() self.assertIsNotNone(received) self.assertEqual(received.ctxt, self.ctxt) self.assertEqual(received.message, {'tx_id': i}) msgs.append(received) # reply in reverse, except reply to the first guy second from last order = list(range(len(senders)-1, -1, -1)) if len(order) > 1: order[-1], order[-2] = order[-2], order[-1] for i in order: if self.timeout is None: if self.failure: try: raise ZeroDivisionError except Exception: failure = sys.exc_info() msgs[i].reply(failure=failure, log_failure=not self.expected) elif self.rx_id: msgs[i].reply({'rx_id': i}) else: msgs[i].reply(self.reply) senders[i].join() self.assertEqual(len(replies), len(senders)) for i, reply in enumerate(replies): if self.timeout is not None: self.assertIsInstance(reply, messaging.MessagingTimeout) elif self.failure: self.assertIsInstance(reply, ZeroDivisionError) elif self.rx_id: self.assertEqual(reply, {'rx_id': order[i]}) else: self.assertEqual(reply, self.reply) if not self.timeout and self.failure and not self.expected: self.assertTrue(len(errors) > 0, errors) else: self.assertEqual(len(errors), 0, errors) TestSendReceive.generate_scenarios() class TestRacyWaitForReply(test_utils.BaseTestCase): def setUp(self): super(TestRacyWaitForReply, self).setUp() self.messaging_conf.transport_driver = 'rabbit' self.messaging_conf.in_memory = True def test_send_receive(self): transport = messaging.get_transport(self.conf) self.addCleanup(transport.cleanup) driver = transport._driver target = messaging.Target(topic='testtopic') listener = driver.listen(target) senders = [] replies = [] msgs = [] wait_conditions = [] orig_reply_waiter = amqpdriver.ReplyWaiter.wait def reply_waiter(self, msg_id, timeout): if wait_conditions: with wait_conditions[0]: wait_conditions.pop().wait() return orig_reply_waiter(self, msg_id, timeout) self.stubs.Set(amqpdriver.ReplyWaiter, 'wait', reply_waiter) def send_and_wait_for_reply(i): replies.append(driver.send(target, {}, {'tx_id': i}, wait_for_reply=True, timeout=None)) while len(senders) < 2: t = threading.Thread(target=send_and_wait_for_reply, args=(len(senders), )) t.daemon = True senders.append(t) # Start the first guy, receive his message, but delay his polling notify_condition = threading.Condition() wait_conditions.append(notify_condition) senders[0].start() msgs.append(listener.poll()) self.assertEqual(msgs[-1].message, {'tx_id': 0}) # Start the second guy, receive his message senders[1].start() msgs.append(listener.poll()) self.assertEqual(msgs[-1].message, {'tx_id': 1}) # Reply to both in order, making the second thread queue # the reply meant for the first thread msgs[0].reply({'rx_id': 0}) msgs[1].reply({'rx_id': 1}) # Wait for the second thread to finish senders[1].join() # Let the first thread continue with notify_condition: notify_condition.notify() # Wait for the first thread to finish senders[0].join() # Verify replies were received out of order self.assertEqual(len(replies), len(senders)) self.assertEqual(replies[0], {'rx_id': 1}) self.assertEqual(replies[1], {'rx_id': 0}) def _declare_queue(target): connection = kombu.connection.BrokerConnection(transport='memory') # Kludge to speed up tests. connection.transport.polling_interval = 0.0 connection.connect() channel = connection.channel() # work around 'memory' transport bug in 1.1.3 channel._new_queue('ae.undeliver') if target.fanout: exchange = kombu.entity.Exchange(name=target.topic + '_fanout', type='fanout', durable=False, auto_delete=True) queue = kombu.entity.Queue(name=target.topic + '_fanout_12345', channel=channel, exchange=exchange, routing_key=target.topic) if target.server: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) topic = '%s.%s' % (target.topic, target.server) queue = kombu.entity.Queue(name=topic, channel=channel, exchange=exchange, routing_key=topic) else: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) queue = kombu.entity.Queue(name=target.topic, channel=channel, exchange=exchange, routing_key=target.topic) queue.declare() return connection, channel, queue class TestRequestWireFormat(test_utils.BaseTestCase): _target = [ ('topic_target', dict(topic='testtopic', server=None, fanout=False)), ('server_target', dict(topic='testtopic', server='testserver', fanout=False)), # NOTE(markmc): https://github.com/celery/kombu/issues/195 ('fanout_target', dict(topic='testtopic', server=None, fanout=True, skip_msg='Requires kombu>2.5.12 to fix kombu issue #195')), ] _msg = [ ('empty_msg', dict(msg={}, expected={})), ('primitive_msg', dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})), ('complex_msg', dict(msg={'a': {'b': datetime.datetime(1920, 2, 3, 4, 5, 6, 7)}}, expected={'a': {'b': '1920-02-03T04:05:06.000007'}})), ] _context = [ ('empty_ctxt', dict(ctxt={}, expected_ctxt={})), ('user_project_ctxt', dict(ctxt={'user': 'mark', 'project': 'snarkybunch'}, expected_ctxt={'_context_user': 'mark', '_context_project': 'snarkybunch'})), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._msg, cls._context, cls._target) def setUp(self): super(TestRequestWireFormat, self).setUp() self.messaging_conf.transport_driver = 'rabbit' self.messaging_conf.in_memory = True self.uuids = [] self.orig_uuid4 = uuid.uuid4 self.useFixture(fixtures.MonkeyPatch('uuid.uuid4', self.mock_uuid4)) def mock_uuid4(self): self.uuids.append(self.orig_uuid4()) return self.uuids[-1] def test_request_wire_format(self): if hasattr(self, 'skip_msg'): self.skipTest(self.skip_msg) transport = messaging.get_transport(self.conf) self.addCleanup(transport.cleanup) driver = transport._driver target = messaging.Target(topic=self.topic, server=self.server, fanout=self.fanout) connection, channel, queue = _declare_queue(target) self.addCleanup(connection.release) driver.send(target, self.ctxt, self.msg) msgs = [] def callback(msg): msg = channel.message_to_python(msg) msg.ack() msgs.append(msg.payload) queue.consume(callback=callback, consumer_tag='1', nowait=False) connection.drain_events() self.assertEqual(1, len(msgs)) self.assertIn('oslo.message', msgs[0]) received = msgs[0] received['oslo.message'] = jsonutils.loads(received['oslo.message']) # FIXME(markmc): add _msg_id and _reply_q check expected_msg = { '_unique_id': self.uuids[0].hex, } expected_msg.update(self.expected) expected_msg.update(self.expected_ctxt) expected = { 'oslo.version': '2.0', 'oslo.message': expected_msg, } self.assertEqual(expected, received) TestRequestWireFormat.generate_scenarios() def _create_producer(target): connection = kombu.connection.BrokerConnection(transport='memory') # Kludge to speed up tests. connection.transport.polling_interval = 0.0 connection.connect() channel = connection.channel() # work around 'memory' transport bug in 1.1.3 channel._new_queue('ae.undeliver') if target.fanout: exchange = kombu.entity.Exchange(name=target.topic + '_fanout', type='fanout', durable=False, auto_delete=True) producer = kombu.messaging.Producer(exchange=exchange, channel=channel, routing_key=target.topic) elif target.server: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) topic = '%s.%s' % (target.topic, target.server) producer = kombu.messaging.Producer(exchange=exchange, channel=channel, routing_key=topic) else: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) producer = kombu.messaging.Producer(exchange=exchange, channel=channel, routing_key=target.topic) return connection, producer class TestReplyWireFormat(test_utils.BaseTestCase): _target = [ ('topic_target', dict(topic='testtopic', server=None, fanout=False)), ('server_target', dict(topic='testtopic', server='testserver', fanout=False)), # NOTE(markmc): https://github.com/celery/kombu/issues/195 ('fanout_target', dict(topic='testtopic', server=None, fanout=True, skip_msg='Requires kombu>2.5.12 to fix kombu issue #195')), ] _msg = [ ('empty_msg', dict(msg={}, expected={})), ('primitive_msg', dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})), ('complex_msg', dict(msg={'a': {'b': '1920-02-03T04:05:06.000007'}}, expected={'a': {'b': '1920-02-03T04:05:06.000007'}})), ] _context = [ ('empty_ctxt', dict(ctxt={}, expected_ctxt={})), ('user_project_ctxt', dict(ctxt={'_context_user': 'mark', '_context_project': 'snarkybunch'}, expected_ctxt={'user': 'mark', 'project': 'snarkybunch'})), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._msg, cls._context, cls._target) def setUp(self): super(TestReplyWireFormat, self).setUp() self.messaging_conf.transport_driver = 'rabbit' self.messaging_conf.in_memory = True def test_reply_wire_format(self): if hasattr(self, 'skip_msg'): self.skipTest(self.skip_msg) transport = messaging.get_transport(self.conf) self.addCleanup(transport.cleanup) driver = transport._driver target = messaging.Target(topic=self.topic, server=self.server, fanout=self.fanout) listener = driver.listen(target) connection, producer = _create_producer(target) self.addCleanup(connection.release) msg = { 'oslo.version': '2.0', 'oslo.message': {} } msg['oslo.message'].update(self.msg) msg['oslo.message'].update(self.ctxt) msg['oslo.message'].update({ '_msg_id': uuid.uuid4().hex, '_unique_id': uuid.uuid4().hex, '_reply_q': 'reply_' + uuid.uuid4().hex, }) msg['oslo.message'] = jsonutils.dumps(msg['oslo.message']) producer.publish(msg) received = listener.poll() self.assertIsNotNone(received) self.assertEqual(self.expected_ctxt, received.ctxt) self.assertEqual(self.expected, received.message) TestReplyWireFormat.generate_scenarios() class RpcKombuHATestCase(test_utils.BaseTestCase): def test_reconnect_order(self): brokers = ['host1', 'host2', 'host3', 'host4', 'host5'] brokers_count = len(brokers) self.conf.rabbit_hosts = brokers self.conf.rabbit_max_retries = 1 info = {'attempt': 0} def _connect(myself, params): # do as little work that is enough to pass connection attempt myself.connection = kombu.connection.BrokerConnection(**params) myself.connection_errors = myself.connection.connection_errors expected_broker = brokers[info['attempt'] % brokers_count] self.assertEqual(params['hostname'], expected_broker) info['attempt'] += 1 # just make sure connection instantiation does not fail with an # exception self.stubs.Set(rabbit_driver.Connection, '_connect', _connect) # starting from the first broker in the list connection = rabbit_driver.Connection(self.conf) # now that we have connection object, revert to the real 'connect' # implementation self.stubs.UnsetAll() for i in range(len(brokers)): self.assertRaises(driver_common.RPCException, connection.reconnect) connection.close() oslo.messaging-1.3.0/tests/test_notifier_logger.py0000664000175300017540000001303212316527457023544 0ustar jenkinsjenkins00000000000000# Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import logging import logging.config import os import sys try: import threading except ImportError: threading = None import mock import testscenarios import testtools from oslo import messaging from oslo.messaging.openstack.common import timeutils from tests import test_notifier from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios # Stolen from openstack.common.logging logging.AUDIT = logging.INFO + 1 logging.addLevelName(logging.AUDIT, 'AUDIT') def get_thread_ident(): if threading is not None: return threading.current_thread().ident else: return None class TestLogNotifier(test_utils.BaseTestCase): scenarios = [ ('debug', dict(priority='debug')), ('info', dict(priority='info')), ('warning', dict(priority='warning', queue='WARN')), ('warn', dict(priority='warn')), ('error', dict(priority='error')), ('critical', dict(priority='critical')), ('audit', dict(priority='audit')), ] def setUp(self): super(TestLogNotifier, self).setUp() self.addCleanup(messaging.notify._impl_test.reset) self.config(notification_driver=['test']) @mock.patch('oslo.messaging.openstack.common.timeutils.utcnow') def test_logger(self, mock_utcnow): with mock.patch('oslo.messaging.transport.get_transport', return_value=test_notifier._FakeTransport(self.conf)): self.logger = messaging.LoggingNotificationHandler('test://') mock_utcnow.return_value = datetime.datetime.utcnow() levelno = getattr(logging, self.priority.upper(), 42) record = logging.LogRecord('foo', levelno, '/foo/bar', 42, 'Something happened', None, None) self.logger.emit(record) n = messaging.notify._impl_test.NOTIFICATIONS[0][1] self.assertEqual(n['priority'], getattr(self, 'queue', self.priority.upper())) self.assertEqual(n['event_type'], 'logrecord') self.assertEqual(n['timestamp'], str(timeutils.utcnow())) self.assertEqual(n['publisher_id'], None) self.assertEqual( n['payload'], {'process': os.getpid(), 'funcName': None, 'name': 'foo', 'thread': get_thread_ident(), 'levelno': levelno, 'processName': 'MainProcess', 'pathname': '/foo/bar', 'lineno': 42, 'msg': 'Something happened', 'exc_info': None, 'levelname': logging.getLevelName(levelno), 'extra': None}) @testtools.skipUnless(hasattr(logging.config, 'dictConfig'), "Need logging.config.dictConfig (Python >= 2.7)") @mock.patch('oslo.messaging.openstack.common.timeutils.utcnow') def test_logging_conf(self, mock_utcnow): with mock.patch('oslo.messaging.transport.get_transport', return_value=test_notifier._FakeTransport(self.conf)): logging.config.dictConfig({ 'version': 1, 'handlers': { 'notification': { 'class': 'oslo.messaging.LoggingNotificationHandler', 'level': self.priority.upper(), 'url': 'test://', }, }, 'loggers': { 'default': { 'handlers': ['notification'], 'level': self.priority.upper(), }, }, }) mock_utcnow.return_value = datetime.datetime.utcnow() levelno = getattr(logging, self.priority.upper()) logger = logging.getLogger('default') lineno = sys._getframe().f_lineno + 1 logger.log(levelno, 'foobar') n = messaging.notify._impl_test.NOTIFICATIONS[0][1] self.assertEqual(n['priority'], getattr(self, 'queue', self.priority.upper())) self.assertEqual(n['event_type'], 'logrecord') self.assertEqual(n['timestamp'], str(timeutils.utcnow())) self.assertEqual(n['publisher_id'], None) pathname = __file__ if pathname.endswith(('.pyc', '.pyo')): pathname = pathname[:-1] self.assertDictEqual( n['payload'], {'process': os.getpid(), 'funcName': 'test_logging_conf', 'name': 'default', 'thread': get_thread_ident(), 'levelno': levelno, 'processName': 'MainProcess', 'pathname': pathname, 'lineno': lineno, 'msg': 'foobar', 'exc_info': None, 'levelname': logging.getLevelName(levelno), 'extra': None}) oslo.messaging-1.3.0/tests/test_notify_listener.py0000664000175300017540000002371512316527457023614 0ustar jenkinsjenkins00000000000000 # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import mock from oslo.config import cfg import testscenarios from oslo import messaging from oslo.messaging.notify import dispatcher from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class ListenerSetupMixin(object): class Listener(object): def __init__(self, transport, targets, endpoints, expect_messages): self._expect_messages = expect_messages self._received_msgs = 0 self._listener = messaging.get_notification_listener( transport, targets, endpoints + [self], allow_requeue=True) def info(self, ctxt, publisher_id, event_type, payload): self._received_msgs += 1 if self._expect_messages == self._received_msgs: # Check start() does nothing with a running listener self._listener.start() self._listener.stop() self._listener.wait() def start(self): self._listener.start() def _setup_listener(self, transport, endpoints, expect_messages, targets=None): listener = self.Listener(transport, targets=targets or [ messaging.Target(topic='testtopic')], expect_messages=expect_messages, endpoints=endpoints) thread = threading.Thread(target=listener.start) thread.daemon = True thread.start() return thread def _stop_listener(self, thread): thread.join(timeout=5) def _setup_notifier(self, transport, topic='testtopic', publisher_id='testpublisher'): return messaging.Notifier(transport, topic=topic, driver='messaging', publisher_id=publisher_id) class TestNotifyListener(test_utils.BaseTestCase, ListenerSetupMixin): def __init__(self, *args): super(TestNotifyListener, self).__init__(*args) ListenerSetupMixin.__init__(self) def setUp(self): super(TestNotifyListener, self).setUp(conf=cfg.ConfigOpts()) def test_constructor(self): transport = messaging.get_transport(self.conf, url='fake:') target = messaging.Target(topic='foo') endpoints = [object()] listener = messaging.get_notification_listener(transport, [target], endpoints) self.assertIs(listener.conf, self.conf) self.assertIs(listener.transport, transport) self.assertIsInstance(listener.dispatcher, dispatcher.NotificationDispatcher) self.assertIs(listener.dispatcher.endpoints, endpoints) self.assertIs(listener.executor, 'blocking') def test_no_target_topic(self): transport = messaging.get_transport(self.conf, url='fake:') listener = messaging.get_notification_listener(transport, [messaging.Target()], [mock.Mock()]) try: listener.start() except Exception as ex: self.assertIsInstance(ex, messaging.InvalidTarget, ex) else: self.assertTrue(False) def test_unknown_executor(self): transport = messaging.get_transport(self.conf, url='fake:') try: messaging.get_notification_listener(transport, [], [], executor='foo') except Exception as ex: self.assertIsInstance(ex, messaging.ExecutorLoadFailure) self.assertEqual(ex.executor, 'foo') else: self.assertTrue(False) def test_one_topic(self): transport = messaging.get_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None listener_thread = self._setup_listener(transport, [endpoint], 1) notifier = self._setup_notifier(transport) notifier.info({}, 'an_event.start', 'test message') self._stop_listener(listener_thread) endpoint.info.assert_called_once_with( {}, 'testpublisher', 'an_event.start', 'test message', {'message_id': mock.ANY, 'timestamp': mock.ANY}) def test_two_topics(self): transport = messaging.get_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None targets = [messaging.Target(topic="topic1"), messaging.Target(topic="topic2")] listener_thread = self._setup_listener(transport, [endpoint], 2, targets=targets) notifier = self._setup_notifier(transport, topic='topic1') notifier.info({'ctxt': '1'}, 'an_event.start1', 'test') notifier = self._setup_notifier(transport, topic='topic2') notifier.info({'ctxt': '2'}, 'an_event.start2', 'test') self._stop_listener(listener_thread) expected = [mock.call({'ctxt': '1'}, 'testpublisher', 'an_event.start1', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY}), mock.call({'ctxt': '2'}, 'testpublisher', 'an_event.start2', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY})] self.assertEqual(sorted(endpoint.info.call_args_list), expected) def test_two_exchanges(self): transport = messaging.get_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None targets = [messaging.Target(topic="topic", exchange="exchange1"), messaging.Target(topic="topic", exchange="exchange2")] listener_thread = self._setup_listener(transport, [endpoint], 3, targets=targets) notifier = self._setup_notifier(transport, topic="topic") def mock_notifier_exchange(name): def side_effect(target, ctxt, message, version): target.exchange = name return transport._driver.send_notification(target, ctxt, message, version) transport._send_notification = mock.MagicMock( side_effect=side_effect) notifier.info({'ctxt': '0'}, 'an_event.start', 'test message default exchange') mock_notifier_exchange('exchange1') notifier.info({'ctxt': '1'}, 'an_event.start', 'test message exchange1') mock_notifier_exchange('exchange2') notifier.info({'ctxt': '2'}, 'an_event.start', 'test message exchange2') self._stop_listener(listener_thread) expected = [mock.call({'ctxt': '1'}, 'testpublisher', 'an_event.start', 'test message exchange1', {'timestamp': mock.ANY, 'message_id': mock.ANY}), mock.call({'ctxt': '2'}, 'testpublisher', 'an_event.start', 'test message exchange2', {'timestamp': mock.ANY, 'message_id': mock.ANY})] self.assertEqual(sorted(endpoint.info.call_args_list), expected) def test_two_endpoints(self): transport = messaging.get_transport(self.conf, url='fake:') endpoint1 = mock.Mock() endpoint1.info.return_value = None endpoint2 = mock.Mock() endpoint2.info.return_value = messaging.NotificationResult.HANDLED listener_thread = self._setup_listener(transport, [endpoint1, endpoint2], 1) notifier = self._setup_notifier(transport) notifier.info({}, 'an_event.start', 'test') self._stop_listener(listener_thread) endpoint1.info.assert_called_once_with( {}, 'testpublisher', 'an_event.start', 'test', { 'timestamp': mock.ANY, 'message_id': mock.ANY}) endpoint2.info.assert_called_once_with( {}, 'testpublisher', 'an_event.start', 'test', { 'timestamp': mock.ANY, 'message_id': mock.ANY}) def test_requeue(self): transport = messaging.get_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info = mock.Mock() def side_effect_requeue(*args, **kwargs): if endpoint.info.call_count == 1: return messaging.NotificationResult.REQUEUE return messaging.NotificationResult.HANDLED endpoint.info.side_effect = side_effect_requeue listener_thread = self._setup_listener(transport, [endpoint], 2) notifier = self._setup_notifier(transport) notifier.info({}, 'an_event.start', 'test') self._stop_listener(listener_thread) expected = [mock.call({}, 'testpublisher', 'an_event.start', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY}), mock.call({}, 'testpublisher', 'an_event.start', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY})] self.assertEqual(endpoint.info.call_args_list, expected) oslo.messaging-1.3.0/tests/test_urls.py0000664000175300017540000002155612316527457021365 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from oslo import messaging from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TestParseURL(test_utils.BaseTestCase): scenarios = [ ('transport', dict(url='foo:', aliases=None, expect=dict(transport='foo'))), ('transport_aliased', dict(url='bar:', aliases=dict(bar='foo'), expect=dict(transport='foo'))), ('virtual_host_slash', dict(url='foo:////', aliases=None, expect=dict(transport='foo', virtual_host='/'))), ('virtual_host', dict(url='foo:///bar', aliases=None, expect=dict(transport='foo', virtual_host='bar'))), ('host', dict(url='foo://host/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host'), ]))), ('ipv6_host', dict(url='foo://[ffff::1]/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='ffff::1'), ]))), ('port', dict(url='foo://host:1234/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host', port=1234), ]))), ('ipv6_port', dict(url='foo://[ffff::1]:1234/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='ffff::1', port=1234), ]))), ('username', dict(url='foo://u@host:1234/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host', port=1234, username='u'), ]))), ('password', dict(url='foo://u:p@host:1234/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host', port=1234, username='u', password='p'), ]))), ('creds_no_host', dict(url='foo://u:p@/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(username='u', password='p'), ]))), ('multi_host', dict(url='foo://u:p@host1:1234,host2:4321/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host1', port=1234, username='u', password='p'), dict(host='host2', port=4321), ]))), ('multi_creds', dict(url='foo://u1:p1@host1:1234,u2:p2@host2:4321/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host1', port=1234, username='u1', password='p1'), dict(host='host2', port=4321, username='u2', password='p2'), ]))), ('multi_creds_ipv6', dict(url='foo://u1:p1@[ffff::1]:1234,u2:p2@[ffff::2]:4321/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='ffff::1', port=1234, username='u1', password='p1'), dict(host='ffff::2', port=4321, username='u2', password='p2'), ]))), ] def test_parse_url(self): self.config(rpc_backend=None) url = messaging.TransportURL.parse(self.conf, self.url, self.aliases) hosts = [] for host in self.expect.get('hosts', []): hosts.append(messaging.TransportHost(host.get('host'), host.get('port'), host.get('username'), host.get('password'))) expected = messaging.TransportURL(self.conf, self.expect.get('transport'), self.expect.get('virtual_host'), hosts) self.assertEqual(url, expected) class TestFormatURL(test_utils.BaseTestCase): scenarios = [ ('rpc_backend', dict(rpc_backend='testbackend', transport=None, virtual_host=None, hosts=[], aliases=None, expected='testbackend:///')), ('rpc_backend_aliased', dict(rpc_backend='testfoo', transport=None, virtual_host=None, hosts=[], aliases=dict(testfoo='testbackend'), expected='testbackend:///')), ('transport', dict(rpc_backend=None, transport='testtransport', virtual_host=None, hosts=[], aliases=None, expected='testtransport:///')), ('transport_aliased', dict(rpc_backend=None, transport='testfoo', virtual_host=None, hosts=[], aliases=dict(testfoo='testtransport'), expected='testtransport:///')), ('virtual_host', dict(rpc_backend=None, transport='testtransport', virtual_host='/vhost', hosts=[], aliases=None, expected='testtransport:////vhost')), ('host', dict(rpc_backend=None, transport='testtransport', virtual_host='/', hosts=[ dict(hostname='host', port=10, username='bob', password='secret'), ], aliases=None, expected='testtransport://bob:secret@host:10//')), ('multi_host', dict(rpc_backend=None, transport='testtransport', virtual_host='', hosts=[ dict(hostname='h1', port=1000, username='b1', password='s1'), dict(hostname='h2', port=2000, username='b2', password='s2'), ], aliases=None, expected='testtransport://b1:s1@h1:1000,b2:s2@h2:2000/')), ('quoting', dict(rpc_backend=None, transport='testtransport', virtual_host='/$', hosts=[ dict(hostname='host', port=10, username='b$', password='s&'), ], aliases=None, expected='testtransport://b%24:s%26@host:10//%24')), ] def test_parse_url(self): self.config(rpc_backend=self.rpc_backend) hosts = [] for host in self.hosts: hosts.append(messaging.TransportHost(host.get('hostname'), host.get('port'), host.get('username'), host.get('password'))) url = messaging.TransportURL(self.conf, self.transport, self.virtual_host, hosts, self.aliases) self.assertEqual(str(url), self.expected) oslo.messaging-1.3.0/tests/utils.py0000664000175300017540000000374412316527457020500 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" from oslo.config import cfg import six from oslotest import base from oslotest import moxstubout TRUE_VALUES = ('true', '1', 'yes') class BaseTestCase(base.BaseTestCase): def setUp(self, conf=cfg.CONF): super(BaseTestCase, self).setUp() from oslo.messaging import conffixture self.messaging_conf = self.useFixture(conffixture.ConfFixture(conf)) self.conf = self.messaging_conf.conf moxfixture = self.useFixture(moxstubout.MoxStubout()) self.mox = moxfixture.mox self.stubs = moxfixture.stubs def config(self, **kw): """Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the tearDown() method. """ group = kw.pop('group', None) for k, v in six.iteritems(kw): self.conf.set_override(k, v, group) oslo.messaging-1.3.0/tests/test_rpc_client.py0000664000175300017540000003677412316527457022532 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg import testscenarios from oslo import messaging from oslo.messaging import serializer as msg_serializer from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class _FakeTransport(object): def __init__(self, conf): self.conf = conf def _send(self, *args, **kwargs): pass class TestCastCall(test_utils.BaseTestCase): scenarios = [ ('cast_no_ctxt_no_args', dict(call=False, ctxt={}, args={})), ('call_no_ctxt_no_args', dict(call=True, ctxt={}, args={})), ('cast_ctxt_and_args', dict(call=False, ctxt=dict(user='testuser', project='testtenant'), args=dict(bar='blaa', foobar=11.01))), ('call_ctxt_and_args', dict(call=True, ctxt=dict(user='testuser', project='testtenant'), args=dict(bar='blaa', foobar=11.01))), ] def test_cast_call(self): self.config(rpc_response_timeout=None) transport = _FakeTransport(self.conf) client = messaging.RPCClient(transport, messaging.Target()) self.mox.StubOutWithMock(transport, '_send') msg = dict(method='foo', args=self.args) kwargs = {} if self.call: kwargs['wait_for_reply'] = True kwargs['timeout'] = None transport._send(messaging.Target(), self.ctxt, msg, **kwargs) self.mox.ReplayAll() method = client.call if self.call else client.cast method(self.ctxt, 'foo', **self.args) class TestCastToTarget(test_utils.BaseTestCase): _base = [ ('all_none', dict(ctor={}, prepare={}, expect={})), ('ctor_exchange', dict(ctor=dict(exchange='testexchange'), prepare={}, expect=dict(exchange='testexchange'))), ('prepare_exchange', dict(ctor={}, prepare=dict(exchange='testexchange'), expect=dict(exchange='testexchange'))), ('prepare_exchange_none', dict(ctor=dict(exchange='testexchange'), prepare=dict(exchange=None), expect={})), ('both_exchange', dict(ctor=dict(exchange='ctorexchange'), prepare=dict(exchange='testexchange'), expect=dict(exchange='testexchange'))), ('ctor_topic', dict(ctor=dict(topic='testtopic'), prepare={}, expect=dict(topic='testtopic'))), ('prepare_topic', dict(ctor={}, prepare=dict(topic='testtopic'), expect=dict(topic='testtopic'))), ('prepare_topic_none', dict(ctor=dict(topic='testtopic'), prepare=dict(topic=None), expect={})), ('both_topic', dict(ctor=dict(topic='ctortopic'), prepare=dict(topic='testtopic'), expect=dict(topic='testtopic'))), ('ctor_namespace', dict(ctor=dict(namespace='testnamespace'), prepare={}, expect=dict(namespace='testnamespace'))), ('prepare_namespace', dict(ctor={}, prepare=dict(namespace='testnamespace'), expect=dict(namespace='testnamespace'))), ('prepare_namespace_none', dict(ctor=dict(namespace='testnamespace'), prepare=dict(namespace=None), expect={})), ('both_namespace', dict(ctor=dict(namespace='ctornamespace'), prepare=dict(namespace='testnamespace'), expect=dict(namespace='testnamespace'))), ('ctor_version', dict(ctor=dict(version='testversion'), prepare={}, expect=dict(version='testversion'))), ('prepare_version', dict(ctor={}, prepare=dict(version='testversion'), expect=dict(version='testversion'))), ('prepare_version_none', dict(ctor=dict(version='testversion'), prepare=dict(version=None), expect={})), ('both_version', dict(ctor=dict(version='ctorversion'), prepare=dict(version='testversion'), expect=dict(version='testversion'))), ('ctor_server', dict(ctor=dict(server='testserver'), prepare={}, expect=dict(server='testserver'))), ('prepare_server', dict(ctor={}, prepare=dict(server='testserver'), expect=dict(server='testserver'))), ('prepare_server_none', dict(ctor=dict(server='testserver'), prepare=dict(server=None), expect={})), ('both_server', dict(ctor=dict(server='ctorserver'), prepare=dict(server='testserver'), expect=dict(server='testserver'))), ('ctor_fanout', dict(ctor=dict(fanout=True), prepare={}, expect=dict(fanout=True))), ('prepare_fanout', dict(ctor={}, prepare=dict(fanout=True), expect=dict(fanout=True))), ('prepare_fanout_none', dict(ctor=dict(fanout=True), prepare=dict(fanout=None), expect={})), ('both_fanout', dict(ctor=dict(fanout=True), prepare=dict(fanout=False), expect=dict(fanout=False))), ] _prepare = [ ('single_prepare', dict(double_prepare=False)), ('double_prepare', dict(double_prepare=True)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._base, cls._prepare) def setUp(self): super(TestCastToTarget, self).setUp(conf=cfg.ConfigOpts()) def test_cast_to_target(self): target = messaging.Target(**self.ctor) expect_target = messaging.Target(**self.expect) transport = _FakeTransport(self.conf) client = messaging.RPCClient(transport, target) self.mox.StubOutWithMock(transport, '_send') msg = dict(method='foo', args={}) if 'namespace' in self.expect: msg['namespace'] = self.expect['namespace'] if 'version' in self.expect: msg['version'] = self.expect['version'] transport._send(expect_target, {}, msg) self.mox.ReplayAll() if self.prepare: client = client.prepare(**self.prepare) if self.double_prepare: client = client.prepare(**self.prepare) client.cast({}, 'foo') TestCastToTarget.generate_scenarios() _notset = object() class TestCallTimeout(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(confval=None, ctor=None, prepare=_notset, expect=None)), ('confval', dict(confval=21.1, ctor=None, prepare=_notset, expect=21.1)), ('ctor', dict(confval=None, ctor=21.1, prepare=_notset, expect=21.1)), ('ctor_zero', dict(confval=None, ctor=0, prepare=_notset, expect=0)), ('prepare', dict(confval=None, ctor=None, prepare=21.1, expect=21.1)), ('prepare_override', dict(confval=None, ctor=10.1, prepare=21.1, expect=21.1)), ('prepare_zero', dict(confval=None, ctor=None, prepare=0, expect=0)), ] def test_call_timeout(self): self.config(rpc_response_timeout=self.confval) transport = _FakeTransport(self.conf) client = messaging.RPCClient(transport, messaging.Target(), timeout=self.ctor) self.mox.StubOutWithMock(transport, '_send') msg = dict(method='foo', args={}) kwargs = dict(wait_for_reply=True, timeout=self.expect) transport._send(messaging.Target(), {}, msg, **kwargs) self.mox.ReplayAll() if self.prepare is not _notset: client = client.prepare(timeout=self.prepare) client.call({}, 'foo') class TestSerializer(test_utils.BaseTestCase): scenarios = [ ('cast', dict(call=False, ctxt=dict(user='bob'), args=dict(a='a', b='b', c='c'), retval=None)), ('call', dict(call=True, ctxt=dict(user='bob'), args=dict(a='a', b='b', c='c'), retval='d')), ] def test_call_serializer(self): self.config(rpc_response_timeout=None) transport = _FakeTransport(self.conf) serializer = msg_serializer.NoOpSerializer() client = messaging.RPCClient(transport, messaging.Target(), serializer=serializer) self.mox.StubOutWithMock(transport, '_send') msg = dict(method='foo', args=dict([(k, 's' + v) for k, v in self.args.items()])) kwargs = dict(wait_for_reply=True, timeout=None) if self.call else {} transport._send(messaging.Target(), dict(user='alice'), msg, **kwargs).AndReturn(self.retval) self.mox.StubOutWithMock(serializer, 'serialize_entity') self.mox.StubOutWithMock(serializer, 'deserialize_entity') self.mox.StubOutWithMock(serializer, 'serialize_context') for arg in self.args: serializer.serialize_entity(self.ctxt, arg).AndReturn('s' + arg) if self.call: serializer.deserialize_entity(self.ctxt, self.retval).\ AndReturn('d' + self.retval) serializer.serialize_context(self.ctxt).AndReturn(dict(user='alice')) self.mox.ReplayAll() method = client.call if self.call else client.cast retval = method(self.ctxt, 'foo', **self.args) if self.retval is not None: self.assertEqual(retval, 'd' + self.retval) class TestVersionCap(test_utils.BaseTestCase): _call_vs_cast = [ ('call', dict(call=True)), ('cast', dict(call=False)), ] _cap_scenarios = [ ('all_none', dict(cap=None, prepare_cap=_notset, version=None, prepare_version=_notset, success=True)), ('ctor_cap_ok', dict(cap='1.1', prepare_cap=_notset, version='1.0', prepare_version=_notset, success=True)), ('ctor_cap_override_ok', dict(cap='2.0', prepare_cap='1.1', version='1.0', prepare_version='1.0', success=True)), ('ctor_cap_override_none_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, success=True)), ('ctor_cap_minor_fail', dict(cap='1.0', prepare_cap=_notset, version='1.1', prepare_version=_notset, success=False)), ('ctor_cap_major_fail', dict(cap='2.0', prepare_cap=_notset, version=None, prepare_version='1.0', success=False)), ] @classmethod def generate_scenarios(cls): cls.scenarios = ( testscenarios.multiply_scenarios(cls._call_vs_cast, cls._cap_scenarios)) def test_version_cap(self): self.config(rpc_response_timeout=None) transport = _FakeTransport(self.conf) target = messaging.Target(version=self.version) client = messaging.RPCClient(transport, target, version_cap=self.cap) if self.success: self.mox.StubOutWithMock(transport, '_send') if self.prepare_version is not _notset: target = target(version=self.prepare_version) msg = dict(method='foo', args={}) if target.version is not None: msg['version'] = target.version kwargs = {} if self.call: kwargs['wait_for_reply'] = True kwargs['timeout'] = None transport._send(target, {}, msg, **kwargs) self.mox.ReplayAll() prep_kwargs = {} if self.prepare_cap is not _notset: prep_kwargs['version_cap'] = self.prepare_cap if self.prepare_version is not _notset: prep_kwargs['version'] = self.prepare_version if prep_kwargs: client = client.prepare(**prep_kwargs) method = client.call if self.call else client.cast try: method({}, 'foo') except Exception as ex: self.assertIsInstance(ex, messaging.RPCVersionCapError, ex) self.assertFalse(self.success) else: self.assertTrue(self.success) TestVersionCap.generate_scenarios() class TestCanSendVersion(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(cap=None, prepare_cap=_notset, version=None, prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_ok', dict(cap='1.1', prepare_cap=_notset, version='1.0', prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_override_ok', dict(cap='2.0', prepare_cap='1.1', version='1.0', prepare_version='1.0', can_send_version=_notset, can_send=True)), ('ctor_cap_override_none_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_can_send_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, can_send_version='1.1', can_send=True)), ('ctor_cap_can_send_none_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, can_send_version=None, can_send=True)), ('ctor_cap_minor_fail', dict(cap='1.0', prepare_cap=_notset, version='1.1', prepare_version=_notset, can_send_version=_notset, can_send=False)), ('ctor_cap_major_fail', dict(cap='2.0', prepare_cap=_notset, version=None, prepare_version='1.0', can_send_version=_notset, can_send=False)), ] def test_version_cap(self): self.config(rpc_response_timeout=None) transport = _FakeTransport(self.conf) target = messaging.Target(version=self.version) client = messaging.RPCClient(transport, target, version_cap=self.cap) prep_kwargs = {} if self.prepare_cap is not _notset: prep_kwargs['version_cap'] = self.prepare_cap if self.prepare_version is not _notset: prep_kwargs['version'] = self.prepare_version if prep_kwargs: client = client.prepare(**prep_kwargs) if self.can_send_version is not _notset: can_send = client.can_send_version(version=self.can_send_version) else: can_send = client.can_send_version() self.assertEqual(can_send, self.can_send) oslo.messaging-1.3.0/tests/test_notifier.py0000664000175300017540000003677712316527457022232 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import logging import sys import uuid import fixtures import mock from stevedore import extension import testscenarios import yaml from oslo import messaging from oslo.messaging.notify import _impl_log from oslo.messaging.notify import _impl_messaging from oslo.messaging.notify import _impl_routing as routing from oslo.messaging.notify import _impl_test from oslo.messaging.notify import notifier as msg_notifier from oslo.messaging.openstack.common import jsonutils from oslo.messaging.openstack.common import timeutils from oslo.messaging import serializer as msg_serializer from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class _FakeTransport(object): def __init__(self, conf): self.conf = conf def _send_notification(self, target, ctxt, message, version): pass class _ReRaiseLoggedExceptionsFixture(fixtures.Fixture): """Record logged exceptions and re-raise in cleanup. The notifier just logs notification send errors so, for the sake of debugging test failures, we record any exceptions logged and re-raise them during cleanup. """ class FakeLogger(object): def __init__(self): self.exceptions = [] def exception(self, msg, *args, **kwargs): self.exceptions.append(sys.exc_info()[1]) def setUp(self): super(_ReRaiseLoggedExceptionsFixture, self).setUp() self.logger = self.FakeLogger() def reraise_exceptions(): for ex in self.logger.exceptions: raise ex self.addCleanup(reraise_exceptions) class TestMessagingNotifier(test_utils.BaseTestCase): _v1 = [ ('v1', dict(v1=True)), ('not_v1', dict(v1=False)), ] _v2 = [ ('v2', dict(v2=True)), ('not_v2', dict(v2=False)), ] _publisher_id = [ ('ctor_pub_id', dict(ctor_pub_id='test', expected_pub_id='test')), ('prep_pub_id', dict(prep_pub_id='test.localhost', expected_pub_id='test.localhost')), ('override', dict(ctor_pub_id='test', prep_pub_id='test.localhost', expected_pub_id='test.localhost')), ] _topics = [ ('no_topics', dict(topics=[])), ('single_topic', dict(topics=['notifications'])), ('multiple_topic2', dict(topics=['foo', 'bar'])), ] _priority = [ ('audit', dict(priority='audit')), ('debug', dict(priority='debug')), ('info', dict(priority='info')), ('warn', dict(priority='warn')), ('error', dict(priority='error')), ('sample', dict(priority='sample')), ('critical', dict(priority='critical')), ] _payload = [ ('payload', dict(payload={'foo': 'bar'})), ] _context = [ ('ctxt', dict(ctxt={'user': 'bob'})), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._v1, cls._v2, cls._publisher_id, cls._topics, cls._priority, cls._payload, cls._context) def setUp(self): super(TestMessagingNotifier, self).setUp() self.logger = self.useFixture(_ReRaiseLoggedExceptionsFixture()).logger self.stubs.Set(_impl_messaging, 'LOG', self.logger) self.stubs.Set(msg_notifier, '_LOG', self.logger) @mock.patch('oslo.messaging.openstack.common.timeutils.utcnow') def test_notifier(self, mock_utcnow): drivers = [] if self.v1: drivers.append('messaging') if self.v2: drivers.append('messagingv2') self.config(notification_driver=drivers, notification_topics=self.topics) transport = _FakeTransport(self.conf) if hasattr(self, 'ctor_pub_id'): notifier = messaging.Notifier(transport, publisher_id=self.ctor_pub_id) else: notifier = messaging.Notifier(transport) if hasattr(self, 'prep_pub_id'): notifier = notifier.prepare(publisher_id=self.prep_pub_id) self.mox.StubOutWithMock(transport, '_send_notification') message_id = uuid.uuid4() self.mox.StubOutWithMock(uuid, 'uuid4') uuid.uuid4().AndReturn(message_id) mock_utcnow.return_value = datetime.datetime.utcnow() message = { 'message_id': str(message_id), 'publisher_id': self.expected_pub_id, 'event_type': 'test.notify', 'priority': self.priority.upper(), 'payload': self.payload, 'timestamp': str(timeutils.utcnow()), } sends = [] if self.v1: sends.append(dict(version=1.0)) if self.v2: sends.append(dict(version=2.0)) for send_kwargs in sends: for topic in self.topics: target = messaging.Target(topic='%s.%s' % (topic, self.priority)) transport._send_notification(target, self.ctxt, message, **send_kwargs).InAnyOrder() self.mox.ReplayAll() method = getattr(notifier, self.priority) method(self.ctxt, 'test.notify', self.payload) TestMessagingNotifier.generate_scenarios() class TestSerializer(test_utils.BaseTestCase): def setUp(self): super(TestSerializer, self).setUp() self.addCleanup(_impl_test.reset) @mock.patch('oslo.messaging.openstack.common.timeutils.utcnow') def test_serializer(self, mock_utcnow): transport = _FakeTransport(self.conf) serializer = msg_serializer.NoOpSerializer() notifier = messaging.Notifier(transport, 'test.localhost', driver='test', topic='test', serializer=serializer) message_id = uuid.uuid4() self.mox.StubOutWithMock(uuid, 'uuid4') uuid.uuid4().AndReturn(message_id) mock_utcnow.return_value = datetime.datetime.utcnow() self.mox.StubOutWithMock(serializer, 'serialize_context') self.mox.StubOutWithMock(serializer, 'serialize_entity') serializer.serialize_context(dict(user='bob')).\ AndReturn(dict(user='alice')) serializer.serialize_entity(dict(user='bob'), 'bar').AndReturn('sbar') self.mox.ReplayAll() notifier.info(dict(user='bob'), 'test.notify', 'bar') message = { 'message_id': str(message_id), 'publisher_id': 'test.localhost', 'event_type': 'test.notify', 'priority': 'INFO', 'payload': 'sbar', 'timestamp': str(timeutils.utcnow()), } self.assertEqual(_impl_test.NOTIFICATIONS, [(dict(user='alice'), message, 'INFO')]) class TestLogNotifier(test_utils.BaseTestCase): @mock.patch('oslo.messaging.openstack.common.timeutils.utcnow') def test_notifier(self, mock_utcnow): self.config(notification_driver=['log']) transport = _FakeTransport(self.conf) notifier = messaging.Notifier(transport, 'test.localhost') message_id = uuid.uuid4() self.mox.StubOutWithMock(uuid, 'uuid4') uuid.uuid4().AndReturn(message_id) mock_utcnow.return_value = datetime.datetime.utcnow() message = { 'message_id': str(message_id), 'publisher_id': 'test.localhost', 'event_type': 'test.notify', 'priority': 'INFO', 'payload': 'bar', 'timestamp': str(timeutils.utcnow()), } logger = self.mox.CreateMockAnything() self.mox.StubOutWithMock(logging, 'getLogger') logging.getLogger('oslo.messaging.notification.test.notify').\ AndReturn(logger) logger.info(jsonutils.dumps(message)) self.mox.ReplayAll() notifier.info({}, 'test.notify', 'bar') def test_sample_priority(self): # Ensure logger drops sample-level notifications. driver = _impl_log.LogDriver(None, None, None) logger = self.mox.CreateMock( logging.getLogger('oslo.messaging.notification.foo')) logger.sample = None self.mox.StubOutWithMock(logging, 'getLogger') logging.getLogger('oslo.messaging.notification.foo').\ AndReturn(logger) self.mox.ReplayAll() msg = {'event_type': 'foo'} driver.notify(None, msg, "sample") class TestRoutingNotifier(test_utils.BaseTestCase): def setUp(self): super(TestRoutingNotifier, self).setUp() self.router = routing.RoutingDriver(None, None, None) def _fake_extension_manager(self, ext): return extension.ExtensionManager.make_test_instance( [extension.Extension('test', None, None, ext), ]) def _empty_extension_manager(self): return extension.ExtensionManager.make_test_instance([]) def test_should_load_plugin(self): self.router.used_drivers = set(["zoo", "blah"]) ext = mock.MagicMock() ext.name = "foo" self.assertFalse(self.router._should_load_plugin(ext)) ext.name = "zoo" self.assertTrue(self.router._should_load_plugin(ext)) def test_load_notifiers_no_config(self): # default routing_notifier_config="" self.router._load_notifiers() self.assertEqual(self.router.routing_groups, {}) self.assertEqual(0, len(self.router.used_drivers)) def test_load_notifiers_no_extensions(self): self.config(routing_notifier_config="routing_notifier.yaml") routing_config = r"" config_file = mock.MagicMock() config_file.return_value = routing_config with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=self._empty_extension_manager()): with mock.patch('oslo.messaging.notify.' '_impl_routing.LOG') as mylog: self.router._load_notifiers() self.assertFalse(mylog.debug.called) self.assertEqual(self.router.routing_groups, {}) def test_load_notifiers_config(self): self.config(routing_notifier_config="routing_notifier.yaml") routing_config = r""" group_1: rpc : foo group_2: rpc : blah """ config_file = mock.MagicMock() config_file.return_value = routing_config with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=self._fake_extension_manager( mock.MagicMock())): self.router._load_notifiers() groups = self.router.routing_groups.keys() groups.sort() self.assertEqual(['group_1', 'group_2'], groups) def test_get_drivers_for_message_accepted_events(self): config = r""" group_1: rpc: accepted_events: - foo.* - blah.zoo.* - zip """ groups = yaml.load(config) group = groups['group_1'] # No matching event ... self.assertEqual([], self.router._get_drivers_for_message( group, "unknown", None)) # Child of foo ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, "foo.1", None)) # Foo itself ... self.assertEqual([], self.router._get_drivers_for_message( group, "foo", None)) # Child of blah.zoo self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, "blah.zoo.zing", None)) def test_get_drivers_for_message_accepted_priorities(self): config = r""" group_1: rpc: accepted_priorities: - info - error """ groups = yaml.load(config) group = groups['group_1'] # No matching priority self.assertEqual([], self.router._get_drivers_for_message( group, None, "unknown")) # Info ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, None, "info")) # Error (to make sure the list is getting processed) ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, None, "error")) def test_get_drivers_for_message_both(self): config = r""" group_1: rpc: accepted_priorities: - info accepted_events: - foo.* driver_1: accepted_priorities: - info driver_2: accepted_events: - foo.* """ groups = yaml.load(config) group = groups['group_1'] # Valid event, but no matching priority self.assertEqual(['driver_2'], self.router._get_drivers_for_message( group, 'foo.blah', "unknown")) # Valid priority, but no matching event self.assertEqual(['driver_1'], self.router._get_drivers_for_message( group, 'unknown', "info")) # Happy day ... x = self.router._get_drivers_for_message(group, 'foo.blah', "info") x.sort() self.assertEqual(['driver_1', 'driver_2', 'rpc'], x) def test_filter_func(self): ext = mock.MagicMock() ext.name = "rpc" # Good ... self.assertTrue(self.router._filter_func(ext, {}, {}, ['foo', 'rpc'])) # Bad self.assertFalse(self.router._filter_func(ext, {}, {}, ['foo'])) def test_notify(self): self.router.routing_groups = {'group_1': None, 'group_2': None} message = {'event_type': 'my_event', 'priority': 'my_priority'} drivers_mock = mock.MagicMock() drivers_mock.side_effect = [['rpc'], ['foo']] with mock.patch.object(self.router, 'plugin_manager') as pm: with mock.patch.object(self.router, '_get_drivers_for_message', drivers_mock): self.router.notify({}, message) self.assertEqual(pm.map.call_args[0][4], ['rpc', 'foo']) oslo.messaging-1.3.0/tests/test_qpid.py0000664000175300017540000005064512316527457021336 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import thread import threading import time import mock import qpid import testscenarios from oslo import messaging from oslo.messaging._drivers import impl_qpid as qpid_driver from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios QPID_BROKER = 'localhost:5672' class TestQpidDriverLoad(test_utils.BaseTestCase): def setUp(self): super(TestQpidDriverLoad, self).setUp() self.messaging_conf.transport_driver = 'qpid' def test_driver_load(self): transport = messaging.get_transport(self.conf) self.assertIsInstance(transport._driver, qpid_driver.QpidDriver) def _is_qpidd_service_running(): """this function checks if the qpid service is running or not.""" qpid_running = True try: broker = QPID_BROKER connection = qpid.messaging.Connection(broker) connection.open() except Exception: # qpid service is not running. qpid_running = False else: connection.close() return qpid_running class _QpidBaseTestCase(test_utils.BaseTestCase): def setUp(self): super(_QpidBaseTestCase, self).setUp() self.messaging_conf.transport_driver = 'qpid' self.fake_qpid = not _is_qpidd_service_running() if self.fake_qpid: self.session_receive = get_fake_qpid_session() self.session_send = get_fake_qpid_session() else: self.broker = QPID_BROKER # create connection from the qpid.messaging # connection for the Consumer. self.con_receive = qpid.messaging.Connection(self.broker) self.con_receive.open() # session to receive the messages self.session_receive = self.con_receive.session() # connection for sending the message self.con_send = qpid.messaging.Connection(self.broker) self.con_send.open() # session to send the messages self.session_send = self.con_send.session() # list to store the expected messages and # the actual received messages self._expected = [] self._messages = [] self.initialized = True def tearDown(self): super(_QpidBaseTestCase, self).tearDown() if self.initialized: if self.fake_qpid: _fake_session.flush_exchanges() else: self.con_receive.close() self.con_send.close() class TestQpidInvalidTopologyVersion(_QpidBaseTestCase): """Unit test cases to test invalid qpid topology version.""" scenarios = [ ('direct', dict(consumer_cls=qpid_driver.DirectConsumer, publisher_cls=qpid_driver.DirectPublisher)), ('topic', dict(consumer_cls=qpid_driver.TopicConsumer, publisher_cls=qpid_driver.TopicPublisher)), ('fanout', dict(consumer_cls=qpid_driver.FanoutConsumer, publisher_cls=qpid_driver.FanoutPublisher)), ] def setUp(self): super(TestQpidInvalidTopologyVersion, self).setUp() self.config(qpid_topology_version=-1) def test_invalid_topology_version(self): def consumer_callback(msg): pass msgid_or_topic = 'test' # not using self.assertRaises because # 1. qpid driver raises Exception(msg) for invalid topology version # 2. flake8 - H202 assertRaises Exception too broad exception_msg = ("Invalid value for qpid_topology_version: %d" % self.conf.qpid_topology_version) recvd_exc_msg = '' try: self.consumer_cls(self.conf, self.session_receive, msgid_or_topic, consumer_callback) except Exception as e: recvd_exc_msg = e.message self.assertEqual(exception_msg, recvd_exc_msg) recvd_exc_msg = '' try: self.publisher_cls(self.conf, self.session_send, msgid_or_topic) except Exception as e: recvd_exc_msg = e.message self.assertEqual(exception_msg, recvd_exc_msg) class TestQpidDirectConsumerPublisher(_QpidBaseTestCase): """Unit test cases to test DirectConsumer and Direct Publisher.""" _n_qpid_topology = [ ('v1', dict(qpid_topology=1)), ('v2', dict(qpid_topology=2)), ] _n_msgs = [ ('single', dict(no_msgs=1)), ('multiple', dict(no_msgs=10)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._n_qpid_topology, cls._n_msgs) def consumer_callback(self, msg): # This function will be called by the DirectConsumer # when any message is received. # Append the received message into the messages list # so that the received messages can be validated # with the expected messages if isinstance(msg, dict): self._messages.append(msg['content']) else: self._messages.append(msg) def test_qpid_direct_consumer_producer(self): self.msgid = str(random.randint(1, 100)) # create a DirectConsumer and DirectPublisher class objects self.dir_cons = qpid_driver.DirectConsumer(self.conf, self.session_receive, self.msgid, self.consumer_callback) self.dir_pub = qpid_driver.DirectPublisher(self.conf, self.session_send, self.msgid) def try_send_msg(no_msgs): for i in range(no_msgs): self._expected.append(str(i)) snd_msg = {'content_type': 'text/plain', 'content': str(i)} self.dir_pub.send(snd_msg) def try_receive_msg(no_msgs): for i in range(no_msgs): self.dir_cons.consume() thread1 = threading.Thread(target=try_receive_msg, args=(self.no_msgs,)) thread2 = threading.Thread(target=try_send_msg, args=(self.no_msgs,)) thread1.start() thread2.start() thread1.join() thread2.join() self.assertEqual(len(self._messages), self.no_msgs) self.assertEqual(self._messages, self._expected) TestQpidDirectConsumerPublisher.generate_scenarios() class TestQpidTopicAndFanout(_QpidBaseTestCase): """Unit Test cases to test TopicConsumer and TopicPublisher classes of the qpid driver and FanoutConsumer and FanoutPublisher classes of the qpid driver """ _n_qpid_topology = [ ('v1', dict(qpid_topology=1)), ('v2', dict(qpid_topology=2)), ] _n_msgs = [ ('single', dict(no_msgs=1)), ('multiple', dict(no_msgs=10)), ] _n_senders = [ ('single', dict(no_senders=1)), ('multiple', dict(no_senders=10)), ] _n_receivers = [ ('single', dict(no_receivers=1)), ] _exchange_class = [ ('topic', dict(consumer_cls=qpid_driver.TopicConsumer, publisher_cls=qpid_driver.TopicPublisher, topic='topictest.test', receive_topic='topictest.test')), ('fanout', dict(consumer_cls=qpid_driver.FanoutConsumer, publisher_cls=qpid_driver.FanoutPublisher, topic='fanouttest', receive_topic='fanouttest')), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._n_qpid_topology, cls._n_msgs, cls._n_senders, cls._n_receivers, cls._exchange_class) def setUp(self): super(TestQpidTopicAndFanout, self).setUp() # to store the expected messages and the # actual received messages # # NOTE(dhellmann): These are dicts, where the base class uses # lists. self._expected = {} self._messages = {} self._senders = [] self._receivers = [] self._sender_threads = [] self._receiver_threads = [] def consumer_callback(self, msg): """callback function called by the ConsumerBase class of qpid driver. Message will be received in the format x-y where x is the sender id and y is the msg number of the sender extract the sender id 'x' and store the msg 'x-y' with 'x' as the key """ if isinstance(msg, dict): msgcontent = msg['content'] else: msgcontent = msg splitmsg = msgcontent.split('-') key = thread.get_ident() if key not in self._messages: self._messages[key] = dict() tdict = self._messages[key] if splitmsg[0] not in tdict: tdict[splitmsg[0]] = [] tdict[splitmsg[0]].append(msgcontent) def _try_send_msg(self, sender_id, no_msgs): for i in range(no_msgs): sendmsg = '%s-%s' % (str(sender_id), str(i)) key = str(sender_id) # Store the message in the self._expected for each sender. # This will be used later to # validate the test by comparing it with the # received messages by all the receivers if key not in self._expected: self._expected[key] = [] self._expected[key].append(sendmsg) send_dict = {'content_type': 'text/plain', 'content': sendmsg} self._senders[sender_id].send(send_dict) def _try_receive_msg(self, receiver_id, no_msgs): for i in range(self.no_senders * no_msgs): no_of_attempts = 0 # ConsumerBase.consume blocks indefinitely until a message # is received. # So qpid_receiver.available() is called before calling # ConsumerBase.consume() so that we are not # blocked indefinitely qpid_receiver = self._receivers[receiver_id].get_receiver() while no_of_attempts < 50: if qpid_receiver.available() > 0: self._receivers[receiver_id].consume() break no_of_attempts += 1 time.sleep(0.05) def test_qpid_topic_and_fanout(self): for receiver_id in range(self.no_receivers): consumer = self.consumer_cls(self.conf, self.session_receive, self.receive_topic, self.consumer_callback) self._receivers.append(consumer) # create receivers threads thread = threading.Thread(target=self._try_receive_msg, args=(receiver_id, self.no_msgs,)) self._receiver_threads.append(thread) for sender_id in range(self.no_senders): publisher = self.publisher_cls(self.conf, self.session_send, self.topic) self._senders.append(publisher) # create sender threads thread = threading.Thread(target=self._try_send_msg, args=(sender_id, self.no_msgs,)) self._sender_threads.append(thread) for thread in self._receiver_threads: thread.start() for thread in self._sender_threads: thread.start() for thread in self._receiver_threads: thread.join() for thread in self._sender_threads: thread.join() # Each receiver should receive all the messages sent by # the sender(s). # So, Iterate through each of the receiver items in # self._messages and compare with the expected messages # messages. self.assertEqual(len(self._expected), self.no_senders) self.assertEqual(len(self._messages), self.no_receivers) for key, messages in self._messages.iteritems(): self.assertEqual(self._expected, messages) TestQpidTopicAndFanout.generate_scenarios() class TestQpidReconnectOrder(test_utils.BaseTestCase): """Unit Test cases to test reconnection """ def test_reconnect_order(self): brokers = ['host1', 'host2', 'host3', 'host4', 'host5'] brokers_count = len(brokers) self.messaging_conf.conf.qpid_hosts = brokers with mock.patch('qpid.messaging.Connection') as conn_mock: # starting from the first broker in the list connection = qpid_driver.Connection(self.messaging_conf.conf) # reconnect will advance to the next broker, one broker per # attempt, and then wrap to the start of the list once the end is # reached for _ in range(brokers_count): connection.reconnect() connection.close() expected = [] for broker in brokers: expected.extend([mock.call(broker), mock.call().open(), mock.call().session(), mock.call().opened(), mock.call().opened().__nonzero__(), mock.call().close()]) # the last one was closed with close(), not reconnect() expected.extend([mock.call(brokers[0]), mock.call().open(), mock.call().session(), mock.call().close()]) conn_mock.assert_has_calls(expected) def synchronized(func): func.__lock__ = threading.Lock() def synced_func(*args, **kws): with func.__lock__: return func(*args, **kws) return synced_func class FakeQpidMsgManager(object): def __init__(self): self._exchanges = {} @synchronized def add_exchange(self, exchange): if exchange not in self._exchanges: self._exchanges[exchange] = {'msgs': [], 'consumers': {}} @synchronized def add_exchange_consumer(self, exchange, consumer_id): exchange_info = self._exchanges[exchange] cons_dict = exchange_info['consumers'] cons_dict[consumer_id] = 0 @synchronized def add_exchange_msg(self, exchange, msg): exchange_info = self._exchanges[exchange] exchange_info['msgs'].append(msg) def get_exchange_msg(self, exchange, index): exchange_info = self._exchanges[exchange] return exchange_info['msgs'][index] def get_no_exch_msgs(self, exchange): exchange_info = self._exchanges[exchange] return len(exchange_info['msgs']) def get_exch_cons_index(self, exchange, consumer_id): exchange_info = self._exchanges[exchange] cons_dict = exchange_info['consumers'] return cons_dict[consumer_id] @synchronized def inc_consumer_index(self, exchange, consumer_id): exchange_info = self._exchanges[exchange] cons_dict = exchange_info['consumers'] cons_dict[consumer_id] += 1 _fake_qpid_msg_manager = FakeQpidMsgManager() class FakeQpidSessionSender(object): def __init__(self, session, id, target, options): self.session = session self.id = id self.target = target self.options = options @synchronized def send(self, object, sync=True, timeout=None): _fake_qpid_msg_manager.add_exchange_msg(self.target, object) def close(self, timeout=None): pass class FakeQpidSessionReceiver(object): def __init__(self, session, id, source, options): self.session = session self.id = id self.source = source self.options = options @synchronized def fetch(self, timeout=None): if timeout is None: # if timeout is not given, take a default time out # of 30 seconds to avoid indefinite loop _timeout = 30 else: _timeout = timeout deadline = time.time() + _timeout while time.time() <= deadline: index = _fake_qpid_msg_manager.get_exch_cons_index(self.source, self.id) try: msg = _fake_qpid_msg_manager.get_exchange_msg(self.source, index) except IndexError: pass else: _fake_qpid_msg_manager.inc_consumer_index(self.source, self.id) return qpid.messaging.Message(msg) time.sleep(0.050) if timeout is None: raise Exception('timed out waiting for reply') def close(self, timeout=None): pass @synchronized def available(self): no_msgs = _fake_qpid_msg_manager.get_no_exch_msgs(self.source) index = _fake_qpid_msg_manager.get_exch_cons_index(self.source, self.id) if no_msgs == 0 or index >= no_msgs: return 0 else: return no_msgs - index class FakeQpidSession(object): def __init__(self, connection=None, name=None, transactional=None): self.connection = connection self.name = name self.transactional = transactional self._receivers = {} self.conf = None self.url = None self._senders = {} self._sender_id = 0 self._receiver_id = 0 @synchronized def sender(self, target, **options): exchange_key = self._extract_exchange_key(target) _fake_qpid_msg_manager.add_exchange(exchange_key) sendobj = FakeQpidSessionSender(self, self._sender_id, exchange_key, options) self._senders[self._sender_id] = sendobj self._sender_id = self._sender_id + 1 return sendobj @synchronized def receiver(self, source, **options): exchange_key = self._extract_exchange_key(source) _fake_qpid_msg_manager.add_exchange(exchange_key) recvobj = FakeQpidSessionReceiver(self, self._receiver_id, exchange_key, options) self._receivers[self._receiver_id] = recvobj _fake_qpid_msg_manager.add_exchange_consumer(exchange_key, self._receiver_id) self._receiver_id += 1 return recvobj def acknowledge(self, message=None, disposition=None, sync=True): pass @synchronized def flush_exchanges(self): _fake_qpid_msg_manager._exchanges = {} def _extract_exchange_key(self, exchange_msg): """This function extracts a unique key for the exchange. This key is used in the dictionary as a 'key' for this exchange. Eg. if the exchange_msg (for qpid topology version 1) is 33/33 ; {"node": {"x-declare": {"auto-delete": true, .... then 33 is returned as the key. Eg 2. For topology v2, if the exchange_msg is - amq.direct/44 ; {"link": {"x-dec....... then 44 is returned """ # first check for ';' semicolon_split = exchange_msg.split(';') # split the first item of semicolon_split with '/' slash_split = semicolon_split[0].split('/') # return the last element of the list as the key key = slash_split[-1] return key.strip() _fake_session = FakeQpidSession() def get_fake_qpid_session(): return _fake_session oslo.messaging-1.3.0/tests/test_rpc_dispatcher.py0000664000175300017540000001516412316527457023370 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testscenarios from oslo import messaging from oslo.messaging import serializer as msg_serializer from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class _FakeEndpoint(object): def __init__(self, target=None): self.target = target def foo(self, ctxt, **kwargs): pass def bar(self, ctxt, **kwargs): pass class TestDispatcher(test_utils.BaseTestCase): scenarios = [ ('no_endpoints', dict(endpoints=[], dispatch_to=None, ctxt={}, msg=dict(method='foo'), success=False, ex=messaging.UnsupportedVersion)), ('default_target', dict(endpoints=[{}], dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo'), success=True, ex=None)), ('default_target_ctxt_and_args', dict(endpoints=[{}], dispatch_to=dict(endpoint=0, method='bar'), ctxt=dict(user='bob'), msg=dict(method='bar', args=dict(blaa=True)), success=True, ex=None)), ('default_target_namespace', dict(endpoints=[{}], dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', namespace=None), success=True, ex=None)), ('default_target_version', dict(endpoints=[{}], dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', version='1.0'), success=True, ex=None)), ('default_target_no_such_method', dict(endpoints=[{}], dispatch_to=None, ctxt={}, msg=dict(method='foobar'), success=False, ex=messaging.NoSuchMethod)), ('namespace', dict(endpoints=[{}, dict(namespace='testns')], dispatch_to=dict(endpoint=1, method='foo'), ctxt={}, msg=dict(method='foo', namespace='testns'), success=True, ex=None)), ('namespace_mismatch', dict(endpoints=[{}, dict(namespace='testns')], dispatch_to=None, ctxt={}, msg=dict(method='foo', namespace='nstest'), success=False, ex=messaging.UnsupportedVersion)), ('version', dict(endpoints=[dict(version='1.5'), dict(version='3.4')], dispatch_to=dict(endpoint=1, method='foo'), ctxt={}, msg=dict(method='foo', version='3.2'), success=True, ex=None)), ('version_mismatch', dict(endpoints=[dict(version='1.5'), dict(version='3.0')], dispatch_to=None, ctxt={}, msg=dict(method='foo', version='3.2'), success=False, ex=messaging.UnsupportedVersion)), ] def test_dispatcher(self): endpoints = [mock.Mock(spec=_FakeEndpoint, target=messaging.Target(**e)) for e in self.endpoints] serializer = None target = messaging.Target() dispatcher = messaging.RPCDispatcher(target, endpoints, serializer) def check_reply(reply=None, failure=None, log_failure=True): if self.ex and failure is not None: ex = failure[1] self.assertFalse(self.success, ex) self.assertIsNotNone(self.ex, ex) self.assertIsInstance(ex, self.ex, ex) if isinstance(ex, messaging.NoSuchMethod): self.assertEqual(ex.method, self.msg.get('method')) elif isinstance(ex, messaging.UnsupportedVersion): self.assertEqual(ex.version, self.msg.get('version', '1.0')) else: self.assertTrue(self.success, failure) self.assertIsNone(failure) incoming = mock.Mock(ctxt=self.ctxt, message=self.msg) incoming.reply.side_effect = check_reply with dispatcher(incoming) as callback: callback() for n, endpoint in enumerate(endpoints): for method_name in ['foo', 'bar']: method = getattr(endpoint, method_name) if self.dispatch_to and n == self.dispatch_to['endpoint'] and \ method_name == self.dispatch_to['method']: method.assert_called_once_with( self.ctxt, **self.msg.get('args', {})) else: self.assertEqual(method.call_count, 0) self.assertEqual(incoming.reply.call_count, 1) class TestSerializer(test_utils.BaseTestCase): scenarios = [ ('no_args_or_retval', dict(ctxt={}, dctxt={}, args={}, retval=None)), ('args_and_retval', dict(ctxt=dict(user='bob'), dctxt=dict(user='alice'), args=dict(a='a', b='b', c='c'), retval='d')), ] def test_serializer(self): endpoint = _FakeEndpoint() serializer = msg_serializer.NoOpSerializer() target = messaging.Target() dispatcher = messaging.RPCDispatcher(target, [endpoint], serializer) self.mox.StubOutWithMock(endpoint, 'foo') args = dict([(k, 'd' + v) for k, v in self.args.items()]) endpoint.foo(self.dctxt, **args).AndReturn(self.retval) self.mox.StubOutWithMock(serializer, 'serialize_entity') self.mox.StubOutWithMock(serializer, 'deserialize_entity') self.mox.StubOutWithMock(serializer, 'deserialize_context') serializer.deserialize_context(self.ctxt).AndReturn(self.dctxt) for arg in self.args: serializer.deserialize_entity(self.dctxt, arg).AndReturn('d' + arg) serializer.serialize_entity(self.dctxt, self.retval).\ AndReturn('s' + self.retval if self.retval else None) self.mox.ReplayAll() retval = dispatcher._dispatch(self.ctxt, dict(method='foo', args=self.args)) if self.retval is not None: self.assertEqual(retval, 's' + self.retval) oslo.messaging-1.3.0/tests/__init__.py0000664000175300017540000000000112316527457021056 0ustar jenkinsjenkins00000000000000 oslo.messaging-1.3.0/tests/test_executor.py0000664000175300017540000001064412316527457022232 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import eventlet import threading import mock import testscenarios from oslo.messaging._executors import impl_blocking from oslo.messaging._executors import impl_eventlet from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TestExecutor(test_utils.BaseTestCase): _impl = [('blocking', dict(executor=impl_blocking.BlockingExecutor, stop_before_return=True)), ('eventlet', dict(executor=impl_eventlet.EventletExecutor, stop_before_return=False))] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._impl) @staticmethod def _run_in_thread(executor): def thread(): executor.start() executor.wait() thread = threading.Thread(target=thread) thread.daemon = True thread.start() thread.join(timeout=30) def test_executor_dispatch(self): callback = mock.MagicMock(return_value='result') class Dispatcher(object): @contextlib.contextmanager def __call__(self, incoming): yield lambda: callback(incoming.ctxt, incoming.message) listener = mock.Mock(spec=['poll']) executor = self.executor(self.conf, listener, Dispatcher()) incoming_message = mock.MagicMock(ctxt={}, message={'payload': 'data'}) def fake_poll(): if self.stop_before_return: executor.stop() return incoming_message else: if listener.poll.call_count == 1: return incoming_message executor.stop() listener.poll.side_effect = fake_poll self._run_in_thread(executor) callback.assert_called_once_with({}, {'payload': 'data'}) TestExecutor.generate_scenarios() class ExceptedException(Exception): pass class EventletContextManagerSpawnTest(test_utils.BaseTestCase): def setUp(self): super(EventletContextManagerSpawnTest, self).setUp() self.before = mock.Mock() self.callback = mock.Mock() self.after = mock.Mock() self.exception_call = mock.Mock() @contextlib.contextmanager def context_mgr(): self.before() try: yield lambda: self.callback() except ExceptedException: self.exception_call() self.after() self.mgr = context_mgr() def test_normal_run(self): thread = impl_eventlet.spawn_with(self.mgr, pool=eventlet) thread.wait() self.assertEqual(self.before.call_count, 1) self.assertEqual(self.callback.call_count, 1) self.assertEqual(self.after.call_count, 1) self.assertEqual(self.exception_call.call_count, 0) def test_excepted_exception(self): self.callback.side_effect = ExceptedException thread = impl_eventlet.spawn_with(self.mgr, pool=eventlet) try: thread.wait() except ExceptedException: pass self.assertEqual(self.before.call_count, 1) self.assertEqual(self.callback.call_count, 1) self.assertEqual(self.after.call_count, 1) self.assertEqual(self.exception_call.call_count, 1) def test_unexcepted_exception(self): self.callback.side_effect = Exception thread = impl_eventlet.spawn_with(self.mgr, pool=eventlet) try: thread.wait() except Exception: pass self.assertEqual(self.before.call_count, 1) self.assertEqual(self.callback.call_count, 1) self.assertEqual(self.after.call_count, 0) self.assertEqual(self.exception_call.call_count, 0) oslo.messaging-1.3.0/tests/test_utils.py0000664000175300017540000000374212316527457021535 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.messaging import _utils as utils from tests import utils as test_utils class VersionIsCompatibleTestCase(test_utils.BaseTestCase): def test_version_is_compatible_same(self): self.assertTrue(utils.version_is_compatible('1.23', '1.23')) def test_version_is_compatible_newer_minor(self): self.assertTrue(utils.version_is_compatible('1.24', '1.23')) def test_version_is_compatible_older_minor(self): self.assertFalse(utils.version_is_compatible('1.22', '1.23')) def test_version_is_compatible_major_difference1(self): self.assertFalse(utils.version_is_compatible('2.23', '1.23')) def test_version_is_compatible_major_difference2(self): self.assertFalse(utils.version_is_compatible('1.23', '2.23')) def test_version_is_compatible_newer_rev(self): self.assertFalse(utils.version_is_compatible('1.23', '1.23.1')) def test_version_is_compatible_newer_rev_both(self): self.assertFalse(utils.version_is_compatible('1.23.1', '1.23.2')) def test_version_is_compatible_older_rev_both(self): self.assertTrue(utils.version_is_compatible('1.23.2', '1.23.1')) def test_version_is_compatible_older_rev(self): self.assertTrue(utils.version_is_compatible('1.24', '1.23.1')) def test_version_is_compatible_no_rev_is_zero(self): self.assertTrue(utils.version_is_compatible('1.23.0', '1.23')) oslo.messaging-1.3.0/tests/test_pool.py0000664000175300017540000000651612316527457021350 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import uuid import testscenarios from oslo.messaging._drivers import pool from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class PoolTestCase(test_utils.BaseTestCase): _max_size = [ ('default_size', dict(max_size=None, n_iters=4)), ('set_max_size', dict(max_size=10, n_iters=10)), ] _create_error = [ ('no_create_error', dict(create_error=False)), ('create_error', dict(create_error=True)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._max_size, cls._create_error) class TestPool(pool.Pool): def create(self): return uuid.uuid4() class ThreadWaitWaiter(object): """A gross hack. Stub out the condition variable's wait() method and spin until it has been called by each thread. """ def __init__(self, cond, n_threads, stubs): self.cond = cond self.stubs = stubs self.n_threads = n_threads self.n_waits = 0 self.orig_wait = cond.wait def count_waits(**kwargs): self.n_waits += 1 self.orig_wait(**kwargs) self.stubs.Set(self.cond, 'wait', count_waits) def wait(self): while self.n_waits < self.n_threads: pass self.stubs.Set(self.cond, 'wait', self.orig_wait) def test_pool(self): kwargs = {} if self.max_size is not None: kwargs['max_size'] = self.max_size p = self.TestPool(**kwargs) if self.create_error: def create_error(): raise RuntimeError orig_create = p.create self.stubs.Set(p, 'create', create_error) self.assertRaises(RuntimeError, p.get) self.stubs.Set(p, 'create', orig_create) objs = [] for i in range(self.n_iters): objs.append(p.get()) self.assertIsInstance(objs[i], uuid.UUID) def wait_for_obj(): o = p.get() self.assertIn(o, objs) waiter = self.ThreadWaitWaiter(p._cond, self.n_iters, self.stubs) threads = [] for i in range(self.n_iters): t = threading.Thread(target=wait_for_obj) t.start() threads.append(t) waiter.wait() for o in objs: p.put(o) for t in threads: t.join() for o in objs: p.put(o) for o in p.iter_free(): self.assertIn(o, objs) objs.remove(o) self.assertEqual(objs, []) PoolTestCase.generate_scenarios() oslo.messaging-1.3.0/tests/test_opts.py0000664000175300017540000000301312316527457021351 0ustar jenkinsjenkins00000000000000 # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pkg_resources from oslo.messaging import opts from tests import utils as test_utils class OptsTestCase(test_utils.BaseTestCase): def _test_list_opts(self, result): self.assertEqual(2, len(result)) groups = [g for (g, l) in result] self.assertIn(None, groups) self.assertIn('matchmaker_ring', groups) opt_names = [o.name for (g, l) in result for o in l] self.assertIn('rpc_backend', opt_names) self.assertIn('allowed_rpc_exception_modules', opt_names) def test_list_opts(self): self._test_list_opts(opts.list_opts()) def test_entry_point(self): result = None for ep in pkg_resources.iter_entry_points('oslo.config.opts'): if ep.name == "oslo.messaging": list_fn = ep.load() result = list_fn() break self.assertIsNotNone(result) self._test_list_opts(result) oslo.messaging-1.3.0/tests/test_rpc_server.py0000664000175300017540000004157312316527457022553 0ustar jenkinsjenkins00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading from oslo.config import cfg import testscenarios from oslo import messaging from tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class ServerSetupMixin(object): class Server(object): def __init__(self, transport, topic, server, endpoint, serializer): target = messaging.Target(topic=topic, server=server) self._server = messaging.get_rpc_server(transport, target, [endpoint, self], serializer=serializer) def stop(self, ctxt): # Check start() does nothing with a running server self._server.start() self._server.stop() self._server.wait() def start(self): self._server.start() class TestSerializer(object): def serialize_entity(self, ctxt, entity): return ('s' + entity) if entity else entity def deserialize_entity(self, ctxt, entity): return ('d' + entity) if entity else entity def serialize_context(self, ctxt): return dict([(k, 's' + v) for k, v in ctxt.items()]) def deserialize_context(self, ctxt): return dict([(k, 'd' + v) for k, v in ctxt.items()]) def __init__(self): self.serializer = self.TestSerializer() def _setup_server(self, transport, endpoint, topic=None, server=None): server = self.Server(transport, topic=topic or 'testtopic', server=server or 'testserver', endpoint=endpoint, serializer=self.serializer) thread = threading.Thread(target=server.start) thread.daemon = True thread.start() return thread def _stop_server(self, client, server_thread, topic=None): if topic is not None: client = client.prepare(topic=topic) client.cast({}, 'stop') server_thread.join(timeout=30) def _setup_client(self, transport, topic='testtopic'): return messaging.RPCClient(transport, messaging.Target(topic=topic), serializer=self.serializer) class TestRPCServer(test_utils.BaseTestCase, ServerSetupMixin): def __init__(self, *args): super(TestRPCServer, self).__init__(*args) ServerSetupMixin.__init__(self) def setUp(self): super(TestRPCServer, self).setUp(conf=cfg.ConfigOpts()) def test_constructor(self): transport = messaging.get_transport(self.conf, url='fake:') target = messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() server = messaging.get_rpc_server(transport, target, endpoints, serializer=serializer) self.assertIs(server.conf, self.conf) self.assertIs(server.transport, transport) self.assertIsInstance(server.dispatcher, messaging.RPCDispatcher) self.assertIs(server.dispatcher.endpoints, endpoints) self.assertIs(server.dispatcher.serializer, serializer) self.assertIs(server.executor, 'blocking') def test_no_target_server(self): transport = messaging.get_transport(self.conf, url='fake:') server = messaging.get_rpc_server(transport, messaging.Target(topic='testtopic'), []) try: server.start() except Exception as ex: self.assertIsInstance(ex, messaging.InvalidTarget, ex) self.assertEqual(ex.target.topic, 'testtopic') else: self.assertTrue(False) def test_no_server_topic(self): transport = messaging.get_transport(self.conf, url='fake:') target = messaging.Target(server='testserver') server = messaging.get_rpc_server(transport, target, []) try: server.start() except Exception as ex: self.assertIsInstance(ex, messaging.InvalidTarget, ex) self.assertEqual(ex.target.server, 'testserver') else: self.assertTrue(False) def _test_no_client_topic(self, call=True): transport = messaging.get_transport(self.conf, url='fake:') client = self._setup_client(transport, topic=None) method = client.call if call else client.cast try: method({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, messaging.InvalidTarget, ex) self.assertIsNotNone(ex.target) else: self.assertTrue(False) def test_no_client_topic_call(self): self._test_no_client_topic(call=True) def test_no_client_topic_cast(self): self._test_no_client_topic(call=False) def test_client_call_timeout(self): transport = messaging.get_transport(self.conf, url='fake:') finished = False wait = threading.Condition() class TestEndpoint(object): def ping(self, ctxt, arg): with wait: if not finished: wait.wait() server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) try: client.prepare(timeout=0).call({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, messaging.MessagingTimeout, ex) else: self.assertTrue(False) with wait: finished = True wait.notify() self._stop_server(client, server_thread) def test_unknown_executor(self): transport = messaging.get_transport(self.conf, url='fake:') try: messaging.get_rpc_server(transport, None, [], executor='foo') except Exception as ex: self.assertIsInstance(ex, messaging.ExecutorLoadFailure) self.assertEqual(ex.executor, 'foo') else: self.assertTrue(False) def test_cast(self): transport = messaging.get_transport(self.conf, url='fake:') class TestEndpoint(object): def __init__(self): self.pings = [] def ping(self, ctxt, arg): self.pings.append(arg) endpoint = TestEndpoint() server_thread = self._setup_server(transport, endpoint) client = self._setup_client(transport) client.cast({}, 'ping', arg='foo') client.cast({}, 'ping', arg='bar') self._stop_server(client, server_thread) self.assertEqual(endpoint.pings, ['dsfoo', 'dsbar']) def test_call(self): transport = messaging.get_transport(self.conf, url='fake:') class TestEndpoint(object): def ping(self, ctxt, arg): return arg server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) self.assertIsNone(client.call({}, 'ping', arg=None)) self.assertEqual(client.call({}, 'ping', arg=0), 0) self.assertEqual(client.call({}, 'ping', arg=False), False) self.assertEqual(client.call({}, 'ping', arg=[]), []) self.assertEqual(client.call({}, 'ping', arg={}), {}) self.assertEqual(client.call({}, 'ping', arg='foo'), 'dsdsfoo') self._stop_server(client, server_thread) def test_direct_call(self): transport = messaging.get_transport(self.conf, url='fake:') class TestEndpoint(object): def ping(self, ctxt, arg): return arg server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) direct = client.prepare(server='testserver') self.assertIsNone(direct.call({}, 'ping', arg=None)) self.assertEqual(client.call({}, 'ping', arg=0), 0) self.assertEqual(client.call({}, 'ping', arg=False), False) self.assertEqual(client.call({}, 'ping', arg=[]), []) self.assertEqual(client.call({}, 'ping', arg={}), {}) self.assertEqual(direct.call({}, 'ping', arg='foo'), 'dsdsfoo') self._stop_server(client, server_thread) def test_context(self): transport = messaging.get_transport(self.conf, url='fake:') class TestEndpoint(object): def ctxt_check(self, ctxt, key): return ctxt[key] server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) self.assertEqual(client.call({'dsa': 'b'}, 'ctxt_check', key='a'), 'dsdsb') self._stop_server(client, server_thread) def test_failure(self): transport = messaging.get_transport(self.conf, url='fake:') class TestEndpoint(object): def ping(self, ctxt, arg): raise ValueError(arg) server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) try: client.call({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, ValueError) self.assertEqual(ex[0], 'dsfoo') else: self.assertTrue(False) self._stop_server(client, server_thread) def test_expected_failure(self): transport = messaging.get_transport(self.conf, url='fake:') class TestEndpoint(object): @messaging.expected_exceptions(ValueError) def ping(self, ctxt, arg): raise ValueError(arg) server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) try: client.call({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, ValueError) self.assertEqual(ex[0], 'dsfoo') else: self.assertTrue(False) self._stop_server(client, server_thread) class TestMultipleServers(test_utils.BaseTestCase, ServerSetupMixin): _exchanges = [ ('same_exchange', dict(exchange1=None, exchange2=None)), ('diff_exchange', dict(exchange1='x1', exchange2='x2')), ] _topics = [ ('same_topic', dict(topic1='t', topic2='t')), ('diff_topic', dict(topic1='t1', topic2='t2')), ] _server = [ ('same_server', dict(server1=None, server2=None)), ('diff_server', dict(server1='s1', server2='s2')), ] _fanout = [ ('not_fanout', dict(fanout1=None, fanout2=None)), ('fanout', dict(fanout1=True, fanout2=True)), ] _method = [ ('call', dict(call1=True, call2=True)), ('cast', dict(call1=False, call2=False)), ] _endpoints = [ ('one_endpoint', dict(multi_endpoints=False, expect1=['ds1', 'ds2'], expect2=['ds1', 'ds2'])), ('two_endpoints', dict(multi_endpoints=True, expect1=['ds1'], expect2=['ds2'])), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._exchanges, cls._topics, cls._server, cls._fanout, cls._method, cls._endpoints) # fanout call not supported def filter_fanout_call(scenario): params = scenario[1] fanout = params['fanout1'] or params['fanout2'] call = params['call1'] or params['call2'] return not (call and fanout) # listening multiple times on same topic/server pair not supported def filter_same_topic_and_server(scenario): params = scenario[1] single_topic = params['topic1'] == params['topic2'] single_server = params['server1'] == params['server2'] return not (single_topic and single_server) # fanout to multiple servers on same topic and exchange # each endpoint will receive both messages def fanout_to_servers(scenario): params = scenario[1] fanout = params['fanout1'] or params['fanout2'] single_exchange = params['exchange1'] == params['exchange2'] single_topic = params['topic1'] == params['topic2'] multi_servers = params['server1'] != params['server2'] if fanout and single_exchange and single_topic and multi_servers: params['expect1'] = params['expect1'][:] + params['expect1'] params['expect2'] = params['expect2'][:] + params['expect2'] return scenario # multiple endpoints on same topic and exchange # either endpoint can get either message def single_topic_multi_endpoints(scenario): params = scenario[1] single_exchange = params['exchange1'] == params['exchange2'] single_topic = params['topic1'] == params['topic2'] if single_topic and single_exchange and params['multi_endpoints']: params['expect_either'] = (params['expect1'] + params['expect2']) params['expect1'] = params['expect2'] = [] else: params['expect_either'] = [] return scenario for f in [filter_fanout_call, filter_same_topic_and_server]: cls.scenarios = filter(f, cls.scenarios) for m in [fanout_to_servers, single_topic_multi_endpoints]: cls.scenarios = map(m, cls.scenarios) def __init__(self, *args): super(TestMultipleServers, self).__init__(*args) ServerSetupMixin.__init__(self) def setUp(self): super(TestMultipleServers, self).setUp(conf=cfg.ConfigOpts()) def test_multiple_servers(self): url1 = 'fake:///' + (self.exchange1 or '') url2 = 'fake:///' + (self.exchange2 or '') transport1 = messaging.get_transport(self.conf, url=url1) if url1 != url2: transport2 = messaging.get_transport(self.conf, url=url1) else: transport2 = transport1 class TestEndpoint(object): def __init__(self): self.pings = [] def ping(self, ctxt, arg): self.pings.append(arg) def alive(self, ctxt): return 'alive' if self.multi_endpoints: endpoint1, endpoint2 = TestEndpoint(), TestEndpoint() else: endpoint1 = endpoint2 = TestEndpoint() thread1 = self._setup_server(transport1, endpoint1, topic=self.topic1, server=self.server1) thread2 = self._setup_server(transport2, endpoint2, topic=self.topic2, server=self.server2) client1 = self._setup_client(transport1, topic=self.topic1) client2 = self._setup_client(transport2, topic=self.topic2) client1 = client1.prepare(server=self.server1) client2 = client2.prepare(server=self.server2) if self.fanout1: client1.call({}, 'alive') client1 = client1.prepare(fanout=True) if self.fanout2: client2.call({}, 'alive') client2 = client2.prepare(fanout=True) (client1.call if self.call1 else client1.cast)({}, 'ping', arg='1') (client2.call if self.call2 else client2.cast)({}, 'ping', arg='2') self.assertTrue(thread1.isAlive()) self._stop_server(client1.prepare(fanout=None), thread1, topic=self.topic1) self.assertTrue(thread2.isAlive()) self._stop_server(client2.prepare(fanout=None), thread2, topic=self.topic2) def check(pings, expect): self.assertEqual(len(pings), len(expect)) for a in expect: self.assertIn(a, pings) if self.expect_either: check(endpoint1.pings + endpoint2.pings, self.expect_either) else: check(endpoint1.pings, self.expect1) check(endpoint2.pings, self.expect2) TestMultipleServers.generate_scenarios() oslo.messaging-1.3.0/tests/test_log_handler.py0000664000175300017540000000545712316527457022660 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import mock from oslo import messaging from oslo.messaging.notify import log_handler from tests import test_notifier from tests import utils as test_utils class PublishErrorsHandlerTestCase(test_utils.BaseTestCase): """Tests for log.PublishErrorsHandler""" def setUp(self): super(PublishErrorsHandlerTestCase, self).setUp() self.publisherrorshandler = (log_handler. PublishErrorsHandler(logging.ERROR)) def test_emit_cfg_log_notifier_in_notifier_drivers(self): drivers = ['messaging', 'log'] self.config(notification_driver=drivers) self.stub_flg = True transport = test_notifier._FakeTransport(self.conf) notifier = messaging.Notifier(transport) def fake_notifier(*args, **kwargs): self.stub_flg = False self.stubs.Set(notifier, 'error', fake_notifier) logrecord = logging.LogRecord(name='name', level='WARN', pathname='/tmp', lineno=1, msg='Message', args=None, exc_info=None) self.publisherrorshandler.emit(logrecord) self.assertTrue(self.stub_flg) @mock.patch.object(messaging.notify.notifier.Notifier, '_notify') def test_emit_notification(self, mock_notify): logrecord = logging.LogRecord(name='name', level='ERROR', pathname='/tmp', lineno=1, msg='Message', args=None, exc_info=None) mock_init = mock.Mock(return_value=None) with mock.patch.object(messaging.notify.notifier.Notifier, '__init__', mock_init): # Recreate the handler so the __init__ mock takes effect. self.publisherrorshandler = (log_handler. PublishErrorsHandler(logging.ERROR)) self.publisherrorshandler.emit(logrecord) mock_init.assert_called_with(mock.ANY, publisher_id='error.publisher') mock_notify.assert_called_with(None, 'error_notification', {'error': 'Message'}, 'ERROR')