././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/0000775000175000017500000000000000000000000015612 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/.coveragerc0000664000175000017500000000016500000000000017735 0ustar00zuulzuul00000000000000[run] branch = True source = ovn_octavia_provider omit = ovn_octavia_provider/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/.pylintrc0000664000175000017500000000602400000000000017461 0ustar00zuulzuul00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. ignore=.git,tests [MESSAGES CONTROL] # TODO: This list is copied from neutron, the options which do not need to be # suppressed have been already removed, some of the remaining options will be # removed by code adjustment. disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise # "E" Error for important programming issues (likely bugs) no-member, # "W" Warnings for stylistic problems or minor programming issues abstract-method, arguments-differ, attribute-defined-outside-init, broad-except, dangerous-default-value, fixme, global-statement, no-init, protected-access, redefined-builtin, redefined-outer-name, signature-differs, unused-argument, unused-import, unused-variable, useless-super-delegation, # "C" Coding convention violations bad-continuation, invalid-name, len-as-condition, misplaced-comparison-constant, missing-docstring, superfluous-parens, ungrouped-imports, wrong-import-order, # "R" Refactor recommendations duplicate-code, no-else-return, no-self-use, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-public-methods, too-many-return-statements, too-many-statements, inconsistent-return-statements, useless-object-inheritance, too-many-nested-blocks, too-many-boolean-expressions, not-callable, # new for python3 version of pylint chained-comparison, consider-using-dict-comprehension, consider-using-in, consider-using-set-comprehension, unnecessary-pass, useless-object-inheritance, arguments-renamed [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= [CLASSES] # List of interface methods to ignore, separated by a comma. ignore-iface-methods= [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use oslo_serialization.jsonutils json [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/.stestr.conf0000664000175000017500000000012200000000000020056 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./ovn_octavia_provider/tests/unit} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641395.0 ovn-octavia-provider-2.0.0/AUTHORS0000664000175000017500000000142200000000000016661 0ustar00zuulzuul00000000000000Brian Haley Brian Haley Corey Bryant Fernando Royo Flavio Fernandes Flavio Fernandes Ghanshyam Mann Gregory Thiemonge Hervé Beraud Luis Tomas Bolivar Maciej JJózefczyk Maciej Jozefczyk Maciej Józefczyk Manpreet Kaur Michał Nasiadka OpenStack Release Bot Rodolfo Alonso Hernandez Sean McGinnis Slawek Kaplonski Terry Wilson ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/CONTRIBUTING.rst0000664000175000017500000000106500000000000020255 0ustar00zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/neutron/+bugs?field.tag=ovn-octavia-provider ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641395.0 ovn-octavia-provider-2.0.0/ChangeLog0000664000175000017500000001230400000000000017364 0ustar00zuulzuul00000000000000CHANGES ======= 2.0.0 ----- * Allow to create ovn loadbalancer on dual-stack provider networks * Add support for fullypopulated load balancers * [FT] Enable OVS and OVN compilation from source * Set listeners back to ACTIVE upon pool/member action failures * Check gateway IP while looking for LR plugged to LS * Fix functional tests job * Support creating members without a subnet ID * Add Python3 yoga unit tests * Update master for stable/xena * Fix lower-constanints and pep8 jobs 1.1.1 ----- * Update docs based on support added recently 1.1.0 ----- * Add Health Monitor support * Fix race condition retrieving logical router rows * docs: Update Freenode to OFTC * Disable some high failure rate tempest tests * Fix new pylint issues * Add a Kuryr Kubernetes co-gating job * Fix functional jobs due to OVS file removal * Ensure that load balancer is added to logical switch * Change minversion of tox to 3.18.0 * Add log for request\_handler events * setup.cfg: Replace dashes with underscores * Fix python 3.9 unit test failures * Add Python3 xena unit tests * Update master for stable/wallaby * Improve enabled\_provider\_drivers default in devstack 1.0.0 ----- * Start running the tempest API tests * Switch to new rolevar for run-temepst role * Add SCTP support * Remove devstack-gate reference * Update sample local.conf to mention OVN\_BRANCH * Check if no members in pool better in batch update * Don't try to store protocol=None in OVSDB * Update to pylint 2.6.0+ * Collect OVN logs * Enable the IPv6 tempest tests * Correctly set member operating status * Change to build OVN from source * Delay string interpolations at logging calls * Remove work around change for test\_port\_forwarding * Fix gate failure * Return UnsupportedOptionError() on loadbalancer failover * Use get\_ovn\_ovsdb\_retry\_max\_interval() * Retry status updates to Octavia * Fix leaked file descriptors by cleaning up objects * Change devstack script to correctly configure driver * Include python36-devel in bindep * Add master and release tempest jobs * Fix and enable test\_port\_forwarding * Switch from unittest2 compat methods to Python 3.x methods * Add Python3 wallaby unit tests * Fix the check for allowed\_cidrs in listeners * Update master for stable/victoria * Mark test\_port\_forwarding unstable to fix gate 0.3.0 ----- * test\_integration.py: remove neutron dependency * Add integration tests with port forwarding * Set OPERATING\_STATUS to ONLINE when pool created with listener * Switch to TOX\_CONSTRAINTS\_FILE * Fix broken gate * Omit tests directory while couting the coverage report * Set the CONF.load\_balancer.enabled\_provider\_drivers tempest conf * Fix member\_batch\_update function to follow api v2 * Add installation of octavia-tempest-plugin to sample local.conf * Update ROUND\_ROBIN to SOURCE\_IP\_PORT in docs * Fix pep8 and functional jobs 0.2.0 ----- * drop mock from lower-constraints * Add a hacking check for importing mock library * Remove unnecessary libraries from lower-constraints * Remove python modules related to coding style checks * Switch to newer openstackdocstheme and reno versions * Do not send status update in case of IpAddressAlreadyAllocated * Fix the Backend class for ovsbapp index changes * Add support for OVN LB selection fields * Fix hacking min version to 3.0.1 * Re-home functional tests * Re-home ovsdb event classes * Re-home unit tests * Re-home OvnProviderHelper class * Fix E741 pep8 errors * Re-home get\_neutron\_client() into common/clients.py * Re-home constants to common/constants.py * Re-home OvnNbIdlForLb class * Use more octavia-lib constants * Re-home MockedLB to tests.unit.fakes * Re-home exceptions to common/exceptions.py * Add release note README file * Bump default tox env from py37 to py38 * Add py38 package metadata * Spawn long-running processes in the driver agent * Update requirements and constraints * Make ovn-octavia-provider-v2-dsvm-scenario voting * Add Python3 victoria unit tests * Update master for stable/ussuri * Update the devstack local.conf sample file * Improve test coverage * Remove the dependency on the "mock" package * Fix incorrect os-testr test requirement * Update hacking for Python3 0.1.0 ----- * Add missing requirements * Stop using Octavia network driver * Respect SSL devstack configuration * Remove backwards-compatibility check * Add unit tests for hacking/checks.py * Add tempest gate, devstack plugin and sample devstack config * Add configuration page to docs * Use queue library directly * Do not try to refresh vips on OVN LB that will be deleted * Enable cover job * Add admin and contributor documentation * Wrap IPv6 address with brackets * Fix Exception string arguments * Centralize traffic when LB and member has FIP * Don't fail if VIP already exist or has been deleted before * Ensure setup.cfg packages matches root directory * Don't send malformed status update to Octavia * Fix py2 vs py3 dict keys comparison in functional test * Cache OvnProviderHelper object in OvnProviderDriver * Don't fail in case subnet or Logical Switch not found * Add support for multiple L4 protocols withing same LB * Import OVN LB functional tests * Move OVN Octavia Provider driver code to this repository * Set basepython in tox.ini to python3 * Initialize repository * Added .gitreview ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/HACKING.rst0000664000175000017500000000163600000000000017416 0ustar00zuulzuul00000000000000ovn-octavia-provider Style Commandments =============================================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ Below you can find a list of checks specific to this repository. - [N322] Detect common errors with assert_called_once_with - [N328] Detect wrong usage with assertEqual - [N330] Use assertEqual(*empty*, observed) instead of assertEqual(observed, *empty*) - [N331] Detect wrong usage with assertTrue(isinstance()). - [N332] Use assertEqual(expected_http_code, observed_http_code) instead of assertEqual(observed_http_code, expected_http_code). - [N343] Production code must not import from ovn_octavia_provider.tests.* - [N344] Python 3: Do not use filter(lambda obj: test(obj), data). Replace it with [obj for obj in data if test(obj)]. - [N347] Test code must not import mock library - [N348] Detect usage of assertItemsEqual ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/LICENSE0000664000175000017500000002363700000000000016632 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/PKG-INFO0000664000175000017500000000404000000000000016705 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: ovn-octavia-provider Version: 2.0.0 Summary: OpenStack Octavia integration with OVN Home-page: https://docs.openstack.org/ovn-octavia-provider/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: =================================================================== ovn-octavia-provider - OVN Provider driver for Octavia LoadBalancer =================================================================== OVN provides virtual networking for Open vSwitch and is a component of the Open vSwitch project. This project provides integration between OpenStack Octavia and OVN. * Free software: Apache license * Source: https://opendev.org/openstack/ovn-octavia-provider * Bugs: https://bugs.launchpad.net/neutron/+bugs?field.tag=ovn-octavia-provider * Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss * IRC: #openstack-neutron on OFTC. * Docs: https://docs.openstack.org/ovn-octavia-provider/latest Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ovn-octavia-provider.svg :target: https://governance.openstack.org/tc/reference/tags/index.html * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/ovn-octavia-provider Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/README.rst0000664000175000017500000000203300000000000017277 0ustar00zuulzuul00000000000000=================================================================== ovn-octavia-provider - OVN Provider driver for Octavia LoadBalancer =================================================================== OVN provides virtual networking for Open vSwitch and is a component of the Open vSwitch project. This project provides integration between OpenStack Octavia and OVN. * Free software: Apache license * Source: https://opendev.org/openstack/ovn-octavia-provider * Bugs: https://bugs.launchpad.net/neutron/+bugs?field.tag=ovn-octavia-provider * Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss * IRC: #openstack-neutron on OFTC. * Docs: https://docs.openstack.org/ovn-octavia-provider/latest Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ovn-octavia-provider.svg :target: https://governance.openstack.org/tc/reference/tags/index.html * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/ovn-octavia-provider ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/bindep.txt0000664000175000017500000000023100000000000017610 0ustar00zuulzuul00000000000000# This file contains runtime (non-python) dependencies # More info at: http://docs.openstack.org/infra/bindep/readme.html python36-devel [platform:rpm] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4445884 ovn-octavia-provider-2.0.0/devstack/0000775000175000017500000000000000000000000017416 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/devstack/local.conf.sample0000664000175000017500000001010400000000000022633 0ustar00zuulzuul00000000000000# # Sample DevStack local.conf. # # This sample file is intended to be used for your typical DevStack environment # that's running all of OpenStack on a single host. # # It will enable the use of OVN as Octavia's Provider driver. # [[local|localrc]] DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password ADMIN_PASSWORD=password SERVICE_PASSWORD=$ADMIN_PASSWORD # Logging # ------- # By default ``stack.sh`` output only goes to the terminal where it runs. It can # be configured to additionally log to a file by setting ``LOGFILE`` to the full # path of the destination log file. A timestamp will be appended to the given name. LOGFILE=$DEST/logs/stack.sh.log # Old log files are automatically removed after 7 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 # Nova logs will be colorized if ``SYSLOG`` is not set; turn this off by setting # ``LOG_COLOR`` false. #LOG_COLOR=False # Enable OVN Q_AGENT=ovn Q_ML2_PLUGIN_MECHANISM_DRIVERS=ovn,logger Q_ML2_PLUGIN_TYPE_DRIVERS=local,flat,vlan,geneve Q_ML2_TENANT_NETWORK_TYPE="geneve" # Enable OVN services enable_service ovn-northd enable_service ovn-controller enable_service q-ovn-metadata-agent # Use Neutron enable_service q-svc # Disable Neutron agents not used with OVN. disable_service q-agt disable_service q-l3 disable_service q-dhcp disable_service q-meta # Enable services, these services depend on neutron plugin. enable_plugin neutron https://opendev.org/openstack/neutron enable_service q-trunk enable_service q-dns #enable_service q-qos # Enable octavia tempest plugin tests enable_plugin octavia-tempest-plugin https://opendev.org/openstack/octavia-tempest-plugin disable_service horizon # Cinder (OpenStack Block Storage) is disabled by default to speed up # DevStack a bit. You may enable it here if you would like to use it. disable_service cinder c-sch c-api c-vol # A UUID to uniquely identify this system. If one is not specified, a random # one will be generated and saved in the file 'ovn-uuid' for re-use in future # DevStack runs. #OVN_UUID= # If using the OVN native layer-3 service, choose a router scheduler to # manage the distribution of router gateways on hypervisors/chassis. # Default value is leastloaded. #OVN_L3_SCHEDULER=leastloaded # The DevStack plugin defaults to using the ovn branch from the official ovs # repo. You can optionally use a different one. For example, you may want to # use the latest patches in blp's ovn branch (and see OVN_BUILD_FROM_SOURCE): #OVN_REPO=https://github.com/blp/ovs-reviews.git #OVN_BRANCH=ovn # NOTE: When specifying the branch, as shown above, you must also enable this! # By default, OVN will be installed from packages. In order to build OVN from # source, set OVN_BUILD_FROM_SOURCE=True #OVN_BUILD_FROM_SOURCE=False # If the admin wants to enable this chassis to host gateway routers for # external connectivity, then set ENABLE_CHASSIS_AS_GW to True. # Then devstack will set ovn-cms-options with enable-chassis-as-gw # in Open_vSwitch table's external_ids column. # If this option is not set on any chassis, all the of them with bridge # mappings configured will be eligible to host a gateway. ENABLE_CHASSIS_AS_GW=True # If you wish to use the provider network for public access to the cloud, # set the following #Q_USE_PROVIDERNET_FOR_PUBLIC=True # Create public bridge OVN_L3_CREATE_PUBLIC_NETWORK=True # This needs to be equalized with Neutron devstack PUBLIC_NETWORK_GATEWAY="172.24.4.1" # Octavia configuration OCTAVIA_NODE="api" DISABLE_AMP_IMAGE_BUILD=True enable_plugin barbican https://opendev.org/openstack/barbican enable_plugin octavia https://opendev.org/openstack/octavia enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard LIBS_FROM_GIT+=python-octaviaclient enable_service octavia enable_service o-api enable_service o-hk enable_service o-da disable_service o-cw disable_service o-hm # OVN octavia provider plugin enable_plugin ovn-octavia-provider https://opendev.org/openstack/ovn-octavia-provider [[post-config|$NOVA_CONF]] [scheduler] discover_hosts_in_cells_interval = 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/devstack/plugin.sh0000775000175000017500000000275100000000000021260 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # devstack plugin for octavia GET_PIP_CACHE_LOCATION=/opt/stack/cache/files/get-pip.py # How to connect to ovsdb-server hosting the OVN NB database if is_service_enabled tls-proxy; then OVN_PROTO=ssl else OVN_PROTO=tcp fi OVN_NB_REMOTE=${OVN_NB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6641} function _configure_provider_driver { iniset ${OCTAVIA_CONF} api_settings enabled_provider_drivers "${OCTAVIA_PROVIDER_DRIVERS}" iniset ${OCTAVIA_CONF} driver_agent enabled_provider_agents ${OCTAVIA_PROVIDER_AGENTS} iniset ${OCTAVIA_CONF} ovn ovn_nb_connection "$OVN_NB_REMOTE" if is_service_enabled tls-proxy; then iniset ${OCTAVIA_CONF} ovn ovn_nb_connection "$OVN_NB_REMOTE" iniset ${OCTAVIA_CONF} ovn ovn_nb_ca_cert "$INT_CA_DIR/ca-chain.pem" iniset ${OCTAVIA_CONF} ovn ovn_nb_certificate "$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" iniset ${OCTAVIA_CONF} ovn ovn_nb_private_key "$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" fi } function is_ovn_enabled { if [[ $NEUTRON_AGENT == "ovn" || $Q_AGENT == "ovn" ]]; then return 0 fi return 1 } function _install_provider_driver { setup_develop $OVN_OCTAVIA_PROVIDER_DIR } if [[ "$1" == "stack" ]]; then case "$2" in post-config) if is_ovn_enabled; then _configure_provider_driver fi ;; install) if is_ovn_enabled; then _install_provider_driver fi ;; esac fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/devstack/settings0000664000175000017500000000057600000000000021211 0ustar00zuulzuul00000000000000OCTAVIA_DIR=${OCTAVIA_DIR:-"${DEST}/octavia"} OCTAVIA_CONF_DIR=${OCTAVIA_CONF_DIR:-"/etc/octavia"} OCTAVIA_PROVIDER_DRIVERS=${OCTAVIA_PROVIDER_DRIVERS:-"amphora:The Octavia Amphora driver.,octavia:Deprecated alias of the Octavia Amphora driver.,ovn:Octavia OVN driver."} OCTAVIA_PROVIDER_AGENTS=${OCTAVIA_PROVIDER_AGENTS:-"ovn"} OVN_OCTAVIA_PROVIDER_DIR=$DEST/ovn-octavia-provider ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4445884 ovn-octavia-provider-2.0.0/doc/0000775000175000017500000000000000000000000016357 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/requirements.txt0000664000175000017500000000053300000000000021644 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. sphinx>=2.0.0,!=2.1.0 # BSD sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD openstackdocstheme>=2.2.1 # Apache-2.0 reno>=3.1.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4445884 ovn-octavia-provider-2.0.0/doc/source/0000775000175000017500000000000000000000000017657 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4445884 ovn-octavia-provider-2.0.0/doc/source/_static/0000775000175000017500000000000000000000000021305 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/source/_static/.placeholder0000664000175000017500000000000000000000000023556 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4445884 ovn-octavia-provider-2.0.0/doc/source/admin/0000775000175000017500000000000000000000000020747 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/source/admin/driver.rst0000664000175000017500000003036700000000000023005 0ustar00zuulzuul00000000000000.. _driver: ==================================== OVN as a Provider Driver for Octavia ==================================== Octavia has integrated support for provider drivers where any third party Load Balancer driver can be integrated with Octavia. Functionality related to this has been developed in OVN and now OVN can now be supported as a provider driver for Octavia. The OVN Provider driver has a few advantages when used as a provider driver for Octavia over Amphora, like: * OVN can be deployed without VMs, so there is no additional overhead as is required currently in Octavia when using the default Amphora driver. * OVN Load Balancers can be deployed faster than default Load Balancers in Octavia (which use Amphora currently) because of no additional deployment requirement. * Since OVN supports virtual networking for both VMs and containers, OVN as a Load Balancer driver can be used succesfully with Kuryr Kubernetes[1]. Limitations of the OVN Provider Driver -------------------------------------- OVN has its own set of limitations when considered as an Load Balancer driver. These include: * OVN currently supports TCP, UDP and SCTP, so Layer-7 based load balancing is not possible with OVN. * Currently, the OVN Provider Driver supports a 1:1 protocol mapping between Listeners and associated Pools, i.e. a Listener which can handle TCP protocols can only be used with pools associated to the TCP protocol. Pools handling UDP protocols cannot be linked with TCP based Listeners. This limitation will be handled in an upcoming core OVN release. * IPv6 support is not tested by Tempest. * Mixed IPv4 and IPv6 members are not supported. * Only the SOURCE_IP_PORT load balancing algorithm is supported, others like ROUND_ROBIN and LEAST_CONNECTIONS are not currently supported. * Octavia flavors are not supported. Creating an OVN based Load Balancer ----------------------------------- The OVN provider driver can be tested out on DevStack using the configuration options in: .. literalinclude:: ../../../devstack/local.conf.sample Kindly note that the configuration allows the user to create Load Balancers of both Amphora and OVN types. Once the DevStack run is complete, the user can create a load balancer in Openstack:: $ openstack loadbalancer create --vip-network-id public --provider ovn +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | admin_state_up | True | | created_at | 2018-12-13T09:08:14 | | description | | | flavor | | | id | 94e7c431-912b-496c-a247-d52875d44ac7 | | listeners | | | name | | | operating_status | OFFLINE | | pools | | | project_id | af820b57868c4864957d523fb32ccfba | | provider | ovn | | provisioning_status | PENDING_CREATE | | updated_at | None | | vip_address | 172.24.4.9 | | vip_network_id | ee97665d-69d0-4995-a275-27855359956a | | vip_port_id | c98e52d0-5965-4b22-8a17-a374f4399193 | | vip_qos_policy_id | None | | vip_subnet_id | 3eed0c05-6527-400e-bb80-df6e59d248f1 | +---------------------+--------------------------------------+ The user can see the different types of loadbalancers with their associated providers as below:: +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+ | id | name | project_id | vip_address | provisioning_status | provider | +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+ | c5f2070c-d51d-46f0-bec6-dd05e7c19370 | | af820b57868c4864957d523fb32ccfba | 172.24.4.10 | ACTIVE | amphora | | 94e7c431-912b-496c-a247-d52875d44ac7 | | af820b57868c4864957d523fb32ccfba | 172.24.4.9 | ACTIVE | ovn | +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+ Now we can see that OVN will show the load balancer in its *loadbalancer* table:: $ ovn-nbctl list load_balancer _uuid : c72de15e-5c2e-4c1b-a21b-8e9a6721193c external_ids : {enabled=True, lr_ref="neutron-3d2a873b-b5b4-4d14-ac24-47a835fd47b2", ls_refs="{\"neutron-ee97665d-69d0-4995-a275-27855359956a\": 1}", "neutron:vip"="172.24.4.9", "neutron:vip_port_id"="c98e52d0-5965-4b22-8a17-a374f4399193"} name : "94e7c431-912b-496c-a247-d52875d44ac7" protocol : tcp vips : {} Next, a Listener can be created for the associated Load Balancer:: $ openstack loadbalancer listener create --protocol TCP --protocol-port / 64015 94e7c431-912b-496c-a247-d52875d44ac7 +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | True | | connection_limit | -1 | | created_at | 2018-12-13T09:14:51 | | default_pool_id | None | | default_tls_container_ref | None | | description | | | id | 21e77cde-854f-4c3e-bd8c-9536ae0443bc | | insert_headers | None | | l7policies | | | loadbalancers | 94e7c431-912b-496c-a247-d52875d44ac7 | | name | | | operating_status | OFFLINE | | project_id | af820b57868c4864957d523fb32ccfba | | protocol | TCP | | protocol_port | 64015 | | provisioning_status | PENDING_CREATE | | sni_container_refs | [] | | timeout_client_data | 50000 | | timeout_member_connect | 5000 | | timeout_member_data | 50000 | | timeout_tcp_inspect | 0 | | updated_at | None | +---------------------------+--------------------------------------+ OVN updates the Listener information in the Load Balancer table:: $ ovn-nbctl list load_balancer _uuid : c72de15e-5c2e-4c1b-a21b-8e9a6721193c external_ids : {enabled=True, "listener_21e77cde-854f-4c3e-bd8c-9536ae0443bc"="64015:", lr_ref="neutron-3d2a873b-b5b4-4d14-ac24-47a835fd47b2", ls_refs="{\"neutron-ee97665d-69d0-4995-a275-27855359956a\": 1}", "neutron:vip"="172.24.4.9", "neutron:vip_port_id"="c98e52d0-5965-4b22-8a17-a374f4399193"} name : "94e7c431-912b-496c-a247-d52875d44ac7" protocol : tcp vips : {} Next, a Pool is associated with the Listener:: $ openstack loadbalancer pool create --protocol TCP --lb-algorithm / SOURCE_IP_PORT --listener 21e77cde-854f-4c3e-bd8c-9536ae0443bc +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | admin_state_up | True | | created_at | 2018-12-13T09:21:37 | | description | | | healthmonitor_id | | | id | 898be8a2-5185-4f3b-8658-a56457f595a9 | | lb_algorithm | SOURCE_IP_PORT | | listeners | 21e77cde-854f-4c3e-bd8c-9536ae0443bc | | loadbalancers | 94e7c431-912b-496c-a247-d52875d44ac7 | | members | | | name | | | operating_status | OFFLINE | | project_id | af820b57868c4864957d523fb32ccfba | | protocol | TCP | | provisioning_status | PENDING_CREATE | | session_persistence | None | | updated_at | None | +---------------------+--------------------------------------+ OVN's Load Balancer table is modified as below:: $ ovn-nbctl list load_balancer _uuid : c72de15e-5c2e-4c1b-a21b-8e9a6721193c external_ids : {enabled=True, "listener_21e77cde-854f-4c3e-bd8c-9536ae0443bc"="64015:", lr_ref="neutron-3d2a873b-b5b4-4d14-ac24-47a835fd47b2", ls_refs="{\"neutron-ee97665d-69d0-4995-a275-27855359956a\": 1}", "neutron:vip"="172.24.4.9", "neutron:vip_port_id"="c98e52d0-5965-4b22-8a17-a374f4399193", "pool_898be8a2-5185-4f3b-8658-a56457f595a9"=""} name : "94e7c431-912b-496c-a247-d52875d44ac7" protocol : tcp vips : {} Lastly, when a member is created, OVN's Load Balancer table is complete:: $ openstack loadbalancer member create --address 10.10.10.10 / --protocol-port 63015 898be8a2-5185-4f3b-8658-a56457f595a9 +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | address | 10.10.10.10 | | admin_state_up | True | | created_at | 2018-12-13T09:26:05 | | id | adf55e70-3d50-4e62-99fd-dd77eababb1c | | name | | | operating_status | NO_MONITOR | | project_id | af820b57868c4864957d523fb32ccfba | | protocol_port | 63015 | | provisioning_status | PENDING_CREATE | | subnet_id | None | | updated_at | None | | weight | 1 | | monitor_port | None | | monitor_address | None | | backup | False | +---------------------+--------------------------------------+ $ ovn-nbctl list load_balancer _uuid : c72de15e-5c2e-4c1b-a21b-8e9a6721193c external_ids : {enabled=True, "listener_21e77cde-854f-4c3e-bd8c-9536ae0443bc"="64015:pool_898be8a2-5185-4f3b-8658-a56457f595a9", lr_ref="neutron-3d2a873b-b5b4-4d14-ac24-47a835fd47b2", ls_refs="{\"neutron-ee97665d-69d0-4995-a275-27855359956a\": 1}", "neutron:vip"="172.24.4.9", "neutron:vip_port_id"="c98e52d0-5965-4b22-8a17-a374f4399193", "pool_898be8a2-5185-4f3b-8658-a56457f595a9"="member_adf55e70-3d50-4e62-99fd-dd77eababb1c_10.10.10.10:63015"} name : "94e7c431-912b-496c-a247-d52875d44ac7" protocol : tcp vips : {"172.24.4.9:64015"="10.10.10.10:63015"} [1]: https://docs.openstack.org/kuryr-kubernetes/latest/installation/services.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/source/admin/index.rst0000664000175000017500000000015000000000000022604 0ustar00zuulzuul00000000000000==================== Administration Guide ==================== .. toctree:: :maxdepth: 1 driver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/source/conf.py0000664000175000017500000000534600000000000021166 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'openstackdocstheme', 'oslo_config.sphinxext', 'sphinxcontrib.rsvgconverter', ] # Project cross-reference roles openstackdocs_projects = [ 'neutron', 'octavia', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/ovn-octavia-provider' openstackdocs_pdf_link = True openstackdocs_bug_project = 'neutron' openstackdocs_bug_tag = 'ovn-octavia-provider' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_static_path = ['_static'] html_theme = 'openstackdocs' # Output file base name for HTML help builder. htmlhelp_basename = 'ovn-octavia-providerdoc' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('pdf-index', 'doc-ovn-octavia-provider.tex', u'OVN Octavia Provider Documentation', u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4485884 ovn-octavia-provider-2.0.0/doc/source/configuration/0000775000175000017500000000000000000000000022526 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/source/configuration/config.rst0000664000175000017500000000073200000000000024527 0ustar00zuulzuul00000000000000======================= Configuration Reference ======================= This section provides a list of all configuration options for OVN Octavia provider. These are auto-generated from OVN Octavia provider code when this documentation is built. Configuration filenames used below are filenames usually used, but there is no restriction on configuration filename and you can use arbitrary file names. .. show-options:: :config-file: etc/oslo-config-generator/ovn.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/source/configuration/index.rst0000664000175000017500000000016700000000000024373 0ustar00zuulzuul00000000000000.. _configuring: =================== Configuration Guide =================== .. toctree:: :maxdepth: 1 config ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4485884 ovn-octavia-provider-2.0.0/doc/source/contributor/0000775000175000017500000000000000000000000022231 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/source/contributor/index.rst0000664000175000017500000000017500000000000024075 0ustar00zuulzuul00000000000000========================= Contributor Documentation ========================= .. toctree:: :maxdepth: 2 loadbalancer ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/source/contributor/loadbalancer.rst0000664000175000017500000003243300000000000025377 0ustar00zuulzuul00000000000000.. _loadbalancer: ================================== OpenStack LoadBalancer API and OVN ================================== Introduction ------------ Load balancing is essential for enabling simple or automatic delivery scaling and availability since application delivery, scaling and availability are considered vital features of any cloud. Octavia is an open source, operator-scale load balancing solution designed to work with OpenStack. The purpose of this document is to propose a design for how we can use OVN as the backend for OpenStack's LoadBalancer API provided by Octavia. Octavia LoadBalancers Today --------------------------- A Detailed design analysis of Octavia is available here: https://docs.openstack.org/octavia/latest/contributor/design/version0.5/component-design.html Currently, Octavia uses the built-in Amphorae driver to fulfill the Loadbalancing requests in Openstack. Amphorae can be a Virtual machine, container, dedicated hardware, appliance or device that actually performs the task of load balancing in the Octavia system. More specifically, an amphora takes requests from clients on the front-end and distributes these to back-end systems. Amphorae communicates with its controllers over the LoadBalancer's network through a driver interface on the controller. Amphorae needs a placeholder, such as a separate VM/Container for deployment, so that it can handle the LoadBalancer's requests. Along with this, it also needs a separate network (termed as lb-mgmt-network) which handles all Amphorae requests. Amphorae has the capability to handle L4 (TCP/UDP) as well as L7 (HTTP) LoadBalancer requests and provides monitoring features using HealthMonitors. Octavia with OVN ---------------- The OVN native LoadBalancer currently supports L4 protocols, with support for L7 protocols aimed for future releases. It does not need any extra hardware/VM/Container for deployment, which is a major positive point when compared with Amphorae. Also, it does not need any special network to handle the LoadBalancer's requests as they are taken care by OpenFlow rules directly. And, though OVN does not have support for TLS, it is in development and once implemented can be integrated with Octavia. This following section details how OVN can be used as an Octavia driver. Overview of Proposed Approach ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The OVN Driver for Octavia runs under the scope of Octavia. The Octavia API receives and forwards calls to the OVN Driver. **Step 1** - Creating a LoadBalancer The Octavia API receives and issues a LoadBalancer creation request on a network to the OVN Provider driver. The OVN driver creates a LoadBalancer in the OVN NorthBound DB and asynchronously updates the Octavia DB with the status response. A VIP port is created in Neutron when the LoadBalancer creation is complete. The VIP information however is not updated in the NorthBound DB until the Members are associated with the LoadBalancer's Pool. **Step 2** - Creating LoadBalancer entities (Pools, Listeners, Members) Once a LoadBalancer is created by OVN in its NorthBound DB, users can now create Pools, Listeners and Members associated with the LoadBalancer using the Octavia API. With the creation of each entity, the LoadBalancer's *external_ids* column in the NorthBound DB will be updated and corresponding Logical and Openflow rules will be added for handling them. **Step 3** - LoadBalancer request processing When a user sends a request to the VIP IP address, the OVN pipeline takes care of load balancing the VIP request to one of the backend members. More information about this can be found in the ovn-northd man pages. OVN LoadBalancer Driver Logic ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * On startup: Open and maintain a connection to the OVN Northbound DB (using the ovsdbapp library). On first connection, and anytime a reconnect happens: * Do a full sync. * Register a callback when a new interface is added or deleted from a router or switch. The LogicalSwitchPortUpdateEvent and LogicalRouterPortEvent are registered to process these events. * When a new LoadBalancer L1 is created, create a Row in OVN's ``Load_Balancer`` table and update its entries for name and network references. If the network on which the LoadBalancer is created is associated with a router, say R1, then add the router reference to the LoadBalancer's *external_ids* and associate the LoadBalancer to the router. Also associate the LoadBalancer L1 with all those networks which have an interface on the router R1. This is required so that Logical Flows for inter-network communication while using the LoadBalancer L1 is possible. Also, during this time, a new port is created via Neutron which acts as a VIP Port. The information of this new port is not visible in OVN's NorthBound DB until a member is added to the LoadBalancer. * If a new network interface is added to the router R1 described above, all the LoadBalancers on that network are associated with the router R1 and all the LoadBalancers on the router are associated with the new network. * If a network interface is removed from the router R1, then all the LoadBalancers which have been solely created on that network (identified using the *ls_ref* attribute in the LoadBalancer's *external_ids*) are removed from the router. Similarly, those LoadBalancers which are associated with the network but not actually created on that network are removed from the network. * A LoadBalancer can either be deleted with all its children entities using the *cascade* option, or its members/pools/listeners can be individually deleted. When the LoadBalancer is deleted, its references and associations from all networks and routers are removed. This might change in the future once the association of LoadBalancers with networks/routers are changed to *weak* from *strong* [3]. Also the VIP port is deleted when the LoadBalancer is deleted. OVN LoadBalancer at work ~~~~~~~~~~~~~~~~~~~~~~~~ OVN Northbound schema [5] has a table to store LoadBalancers. The table looks like:: "Load_Balancer": { "columns": { "name": {"type": "string"}, "vips": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp"]]}, "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, There is a ``load_balancer`` column in the Logical_Switch table (which corresponds to a Neutron network) as well as the Logical_Router table (which corresponds to a Neutron router) referring back to the 'Load_Balancer' table. The OVN driver updates the OVN Northbound DB. When a LoadBalancer is created, a row in this table is created. When the listeners and members are added, the 'vips' column and the Logical_Switch's ``load_balancer`` column are updated accordingly. The ovn-northd service, which monitors for changes to the OVN Northbound DB, generates OVN logical flows to enable load balancing, and ovn-controller running on each compute node translates the logical flows into actual OpenFlow rules. The status of each entity in the Octavia DB is managed according to [4] Below are a few examples on what happens when LoadBalancer commands are executed and what changes in the Load_Balancer Northbound DB table. 1. Create a LoadBalancer:: $ openstack loadbalancer create --provider ovn --vip-subnet-id=private lb1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 1}", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 2. Create a pool:: $ openstack loadbalancer pool create --name p1 --loadbalancer lb1 --protocol TCP --lb-algorithm SOURCE_IP_PORT $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 1}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"="", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 3. Create a member:: $ openstack loadbalancer member create --address 10.0.0.107 --subnet-id 2d54ec67-c589-473b-bc67-41f3d1331fef --protocol-port 80 p1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 2}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"= "member_579c0c9f-d37d-4ba5-beed-cabf6331032d_10.0.0.107:80", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 4. Create another member:: $ openstack loadbalancer member create --address 20.0.0.107 --subnet-id c2e2da10-1217-4fe2-837a-1c45da587df7 --protocol-port 80 p1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 2, \"neutron-12c42705-3e15-4e2d-8fc0-070d1b80b9ef\": 1}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"= "member_579c0c9f-d37d-4ba5-beed-cabf6331032d_10.0.0.107:80, member_d100f2ed-9b55-4083-be78-7f203d095561_20.0.0.107:80", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 5. Create a listener:: $ openstack loadbalancer listener create --name l1 --protocol TCP --protocol-port 82 --default-pool p1 lb1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 2, \"neutron-12c42705-3e15-4e2d-8fc0-070d1b80b9ef\": 1}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"="10.0.0.107:80,20.0.0.107:80", "listener_12345678-2501-43f2-b34e-38a9cb7e4132"= "82:pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {"10.0.0.10:82"="10.0.0.107:80,20.0.0.107:80"} As explained earlier in the design section: - If a network N1 has a LoadBalancer LB1 associated to it and one of its interfaces is added to a router R1, LB1 is associated with R1 as well. - If a network N2 has a LoadBalancer LB2 and one of its interfaces is added to the router R1, then R1 will have both LoadBalancers LB1 and LB2. N1 and N2 will also have both the LoadBalancers associated to them. However, kindly note that although network N1 would have both LB1 and LB2 LoadBalancers associated with it, only LB1 would be the LoadBalancer which has a direct reference to the network N1, since LB1 was created on N1. This is visible in the ``ls_ref`` key of the ``external_ids`` column in LB1's entry in the ``load_balancer`` table. - If a network N3 is added to the router R1, N3 will also have both LoadBalancers (LB1, LB2) associated to it. - If the interface to network N2 is removed from R1, network N2 will now only have LB2 associated with it. Networks N1 and N3 and router R1 will have LoadBalancer LB1 associated with them. Limitations ----------- The Following actions are not supported by the OVN Provider Driver: - Creating a LoadBalancer/Listener/Pool with an L7 Protocol - Currently only one algorithm is supported for pool management (Source IP Port) The following issue exists with OVN's integration with Octavia: - If creation/deletion of a LoadBalancer, Listener, Pool or Member fails, then the corresponding object will remain in the DB in a PENDING_* state. Support Matrix -------------- A detailed matrix of the operations supported by OVN Provider driver in Octavia can be found in https://docs.openstack.org/octavia/latest/user/feature-classification/index.html Other References ---------------- [1] Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/ [2] Octavia Glossary: https://docs.openstack.org/octavia/latest/reference/glossary.html [3] https://github.com/openvswitch/ovs/commit/612f80fa8ebf88dad2e204364c6c02b451dca36c [4] https://docs.openstack.org/api-ref/load-balancer/v2/index.html#status-codes [5] https://github.com/openvswitch/ovs/blob/d1b235d7a6246e00d4afc359071d3b6b3ed244c3/ovn/ovn-nb.ovsschema#L117 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/source/index.rst0000664000175000017500000000240100000000000021515 0ustar00zuulzuul00000000000000.. Copyright 2011-2020 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Welcome to the OVN Octavia provider driver's documentation! =========================================================== .. We use different index pages for HTML and PDF documents for better TOC. Please ensure to update pdf-index.rst when you update the index below. Contents -------- .. toctree:: :maxdepth: 2 admin/index contributor/index configuration/index Search ------ * :ref:`OVN Octavia provider driver document search `: Search the contents of this document. * `OpenStack wide search `_: Search the wider set of OpenStack documentation, including forums. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/doc/source/pdf-index.rst0000664000175000017500000000133100000000000022265 0ustar00zuulzuul00000000000000:orphan: .. Copyright 2011- OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. toctree:: :maxdepth: 2 admin/index contributor/index ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4365885 ovn-octavia-provider-2.0.0/etc/0000775000175000017500000000000000000000000016365 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4485884 ovn-octavia-provider-2.0.0/etc/octavia/0000775000175000017500000000000000000000000020013 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/etc/octavia/.placeholder0000664000175000017500000000000000000000000022264 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4485884 ovn-octavia-provider-2.0.0/etc/octavia/conf.d/0000775000175000017500000000000000000000000021162 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/etc/octavia/conf.d/.placeholder0000664000175000017500000000000000000000000023433 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4485884 ovn-octavia-provider-2.0.0/etc/oslo-config-generator/0000775000175000017500000000000000000000000022570 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/etc/oslo-config-generator/ovn.conf0000664000175000017500000000016000000000000024236 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/octavia/conf.d/ovn.conf.sample wrap_width = 79 namespace = octavia.api.drivers.ovn ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/lower-constraints.txt0000664000175000017500000000476500000000000022064 0ustar00zuulzuul00000000000000alabaster==0.7.10 alembic==1.6.5 amqp==5.0.2 appdirs==1.4.3 astroid==2.4.0 Babel==2.9.0 bandit==1.1.0 beautifulsoup4==4.6.0 cachetools==2.0.0 cffi==1.14.0 chardet==3.0.4 cliff==2.8.0 cmd2==0.8.0 contextlib2==0.5.5 coverage==4.0 cryptography==3.2 debtcollector==1.19.0 decorator==4.0.0 deprecation==1.0 docutils==0.11 dogpile.cache==0.8.0 entrypoints==0.3 enum-compat==0.0.3 eventlet==0.22.1 extras==1.0.0 fasteners==0.7.0 fixtures==3.0.0 flake8==3.7.9 flake8-import-order==0.12 future==0.16.0 futurist==1.2.0 gitdb==4.0.5 gitdb2==2.0.3 GitPython==3.1.11 greenlet==0.4.15 hacking==3.0.1 httplib2==0.18.1 idna==2.6 iso8601==0.1.11 isort==4.3.21 Jinja2==2.10 jmespath==0.9.0 jsonpatch==1.16 jsonpointer==1.13 jsonschema==2.6.0 keystoneauth1==3.14.0 keystonemiddleware==5.1.0 kombu==5.0.2 linecache2==1.0.0 logutils==0.3.5 Mako==1.0.7 MarkupSafe==1.1.1 mccabe==0.6.1 mock==4.0.2 monotonic==1.4 mox3==1.1.0 msgpack==1.0.0 msgpack-python==0.4.0 munch==2.1.0 netaddr==0.7.18 netifaces==0.10.4 neutron==18.0.0 neutron-lib==2.16.0 octavia-lib==2.2.0 openstacksdk==0.31.2 os-client-config==1.28.0 os-service-types==1.7.0 os-traits==2.4.0 os-vif==2.3.0 os-xenapi==0.3.4 osc-lib==1.8.0 oslo.cache==1.26.0 oslo.concurrency==3.26.0 oslo.config==8.0.0 oslo.context==2.22.0 oslo.db==8.5.0 oslo.i18n==3.20.0 oslo.log==4.3.0 oslo.messaging==12.4.0 oslo.middleware==3.31.0 oslo.policy==3.7.0 oslo.privsep==2.3.0 oslo.reports==1.18.0 oslo.rootwrap==5.8.0 oslo.serialization==2.28.1 oslo.service==1.31.0 oslo.upgradecheck==1.3.0 oslo.utils==4.5.0 oslo.versionedobjects==1.35.1 oslotest==3.2.0 osprofiler==2.3.0 ovs==2.10.0 ovsdbapp==1.7.0 Paste==2.0.2 PasteDeploy==1.5.0 pbr==4.0.0 pecan==1.3.2 pika==0.10.0 pika-pool==0.1.3 prettytable==0.7.2 psutil==5.3.0 pycadf==1.1.0 pycparser==2.18 pyflakes==2.1.1 Pygments==2.2.0 pyinotify==0.9.6 pylint==2.6.0 pyparsing==2.1.0 pyperclip==1.5.27 pyroute2==0.5.13 python-dateutil==2.7.0 python-designateclient==2.7.0 python-editor==1.0.3 python-keystoneclient==3.8.0 python-mimeparse==1.6.0 python-neutronclient==6.7.0 python-novaclient==9.1.0 python-subunit==1.0.0 pytz==2015.7 PyYAML==5.3.1 reno==3.1.0 repoze.lru==0.7 requests==2.23.0 requestsexceptions==1.2.0 rfc3986==1.2.0 Routes==2.3.1 simplejson==3.5.1 snowballstemmer==1.2.1 SQLAlchemy==1.4.23 sqlalchemy-migrate==0.11.0 sqlparse==0.2.2 statsd==3.2.1 stestr==1.0.0 Tempita==0.5.2 tenacity==6.0.0 testresources==2.0.0 testscenarios==0.4 testtools==2.2.0 tinyrpc==0.6 traceback2==1.4.0 unittest2==1.1.0 vine==5.0.0 waitress==1.1.0 WebOb==1.8.2 WebTest==2.0.27 wrapt==1.12.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4485884 ovn-octavia-provider-2.0.0/ovn_octavia_provider/0000775000175000017500000000000000000000000022034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/__init__.py0000664000175000017500000000000000000000000024133 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/agent.py0000664000175000017500000000377200000000000023515 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from ovn_octavia_provider.common import config as ovn_conf from ovn_octavia_provider import event as ovn_event from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.ovsdb import impl_idl_ovn ovn_conf.register_opts() LOG = logging.getLogger(__name__) OVN_EVENT_LOCK_NAME = "neutron_ovn_octavia_event_lock" def OvnProviderAgent(exit_event): helper = ovn_helper.OvnProviderHelper() events = [ovn_event.LogicalRouterPortEvent(helper), ovn_event.LogicalSwitchPortUpdateEvent(helper)] sb_events = [ovn_event.ServiceMonitorUpdateEvent(helper)] # NOTE(mjozefcz): This API is only for handling OVSDB events! ovn_nb_idl_for_events = impl_idl_ovn.OvnNbIdlForLb( event_lock_name=OVN_EVENT_LOCK_NAME) ovn_nb_idl_for_events.notify_handler.watch_events(events) ovn_nb_idl_for_events.start() ovn_sb_idl_for_events = impl_idl_ovn.OvnSbIdlForLb( event_lock_name=OVN_EVENT_LOCK_NAME) ovn_sb_idl_for_events.notify_handler.watch_events(sb_events) ovn_sb_idl_for_events.start() LOG.info('OVN provider agent has started.') exit_event.wait() LOG.info('OVN provider agent is exiting.') ovn_nb_idl_for_events.notify_handler.unwatch_events(events) ovn_nb_idl_for_events.stop() ovn_sb_idl_for_events.notify_handler.unwatch_events(sb_events) ovn_sb_idl_for_events.stop() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4525883 ovn-octavia-provider-2.0.0/ovn_octavia_provider/common/0000775000175000017500000000000000000000000023324 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/common/clients.py0000664000175000017500000001025700000000000025344 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from neutronclient.common import exceptions as n_exc from neutronclient.neutron import client as neutron_client from octavia_lib.api.drivers import exceptions as driver_exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from ovn_octavia_provider.common import constants from ovn_octavia_provider.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF NEUTRON_VERSION = '2.0' class KeystoneSession(): def __init__(self, section=constants.SERVICE_AUTH): self._session = None self._auth = None self.section = section ks_loading.register_auth_conf_options(cfg.CONF, self.section) ks_loading.register_session_conf_options(cfg.CONF, self.section) @property def session(self): """Initialize a Keystone session. :return: a Keystone Session object """ if not self._session: self._session = ks_loading.load_session_from_conf_options( cfg.CONF, self.section, auth=self.auth) return self._session @property def auth(self): if not self._auth: self._auth = ks_loading.load_auth_from_conf_options( cfg.CONF, self.section) return self._auth class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class NeutronAuth(metaclass=Singleton): def __init__(self, region, service_name=None, endpoint=None, endpoint_type='publicURL', insecure=False, ca_cert=None): """Create neutron client object. :param region: The region of the service :param service_name: The name of the neutron service in the catalog :param endpoint: The endpoint of the service :param endpoint_type: The endpoint_type of the service :param insecure: Turn off certificate validation :param ca_cert: CA Cert file path :return: a Neutron Client object. :raises Exception: if the client cannot be created """ ksession = KeystoneSession() kwargs = {'region_name': region, 'session': ksession.session, 'endpoint_type': endpoint_type, 'insecure': insecure} if service_name: kwargs['service_name'] = service_name if endpoint: kwargs['endpoint_override'] = endpoint if ca_cert: kwargs['ca_cert'] = ca_cert try: self.neutron_client = neutron_client.Client( NEUTRON_VERSION, **kwargs) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Error creating Neutron client.") def get_neutron_client(): try: return NeutronAuth( endpoint=CONF.neutron.endpoint, region=CONF.neutron.region_name, endpoint_type=CONF.neutron.endpoint_type, service_name=CONF.neutron.service_name, insecure=CONF.neutron.insecure, ca_cert=CONF.neutron.ca_certificates_file, ).neutron_client except n_exc.NeutronClientException as e: msg = _('Cannot inialize Neutron Client. Exception: %s. ' 'Please verify Neutron service configuration ' 'in Octavia API configuration.') % e raise driver_exceptions.DriverError( operator_fault_string=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/common/config.py0000664000175000017500000001324600000000000025151 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg from oslo_log import log as logging from ovn_octavia_provider.i18n import _ LOG = logging.getLogger(__name__) ovn_opts = [ cfg.StrOpt('ovn_nb_connection', default='tcp:127.0.0.1:6641', help=_('The connection string for the OVN_Northbound OVSDB.\n' 'Use tcp:IP:PORT for TCP connection.\n' 'Use ssl:IP:PORT for SSL connection. The ' 'ovn_nb_private_key, ovn_nb_certificate and ' 'ovn_nb_ca_cert are mandatory.\n' 'Use unix:FILE for unix domain socket connection.')), cfg.StrOpt('ovn_nb_private_key', default='', help=_('The PEM file with private key for SSL connection to ' 'OVN-NB-DB')), cfg.StrOpt('ovn_nb_certificate', default='', help=_('The PEM file with certificate that certifies the ' 'private key specified in ovn_nb_private_key')), cfg.StrOpt('ovn_nb_ca_cert', default='', help=_('The PEM file with CA certificate that OVN should use to' ' verify certificates presented to it by SSL peers')), cfg.StrOpt('ovn_sb_connection', default='tcp:127.0.0.1:6642', help=_('The connection string for the OVN_Southbound OVSDB.\n' 'Use tcp:IP:PORT for TCP connection.\n' 'Use ssl:IP:PORT for SSL connection. The ' 'ovn_sb_private_key, ovn_sb_certificate and ' 'ovn_sb_ca_cert are mandatory.\n' 'Use unix:FILE for unix domain socket connection.')), cfg.StrOpt('ovn_sb_private_key', default='', help=_('The PEM file with private key for SSL connection to ' 'OVN-SB-DB')), cfg.StrOpt('ovn_sb_certificate', default='', help=_('The PEM file with certificate that certifies the ' 'private key specified in ovn_sb_private_key')), cfg.StrOpt('ovn_sb_ca_cert', default='', help=_('The PEM file with CA certificate that OVN should use to' ' verify certificates presented to it by SSL peers')), cfg.IntOpt('ovsdb_connection_timeout', default=180, help=_('Timeout in seconds for the OVSDB ' 'connection transaction')), cfg.IntOpt('ovsdb_retry_max_interval', default=180, help=_('Max interval in seconds between ' 'each retry to get the OVN NB and SB IDLs')), cfg.IntOpt('ovsdb_probe_interval', min=0, default=60000, help=_('The probe interval in for the OVSDB session in ' 'milliseconds. If this is zero, it disables the ' 'connection keepalive feature. If non-zero the value ' 'will be forced to at least 1000 milliseconds. Defaults ' 'to 60 seconds.')), ] neutron_opts = [ cfg.StrOpt('service_name', help=_('The name of the neutron service in the ' 'keystone catalog')), cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' 'in the keystone catalog.')), cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack services.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Endpoint interface in identity service to use')), cfg.StrOpt('ca_certificates_file', help=_('CA certificates file path')), cfg.BoolOpt('insecure', default=False, help=_('Disable certificate validation on SSL connections ')), ] def register_opts(): cfg.CONF.register_opts(ovn_opts, group='ovn') cfg.CONF.register_opts(neutron_opts, group='neutron') ks_loading.register_auth_conf_options(cfg.CONF, 'service_auth') ks_loading.register_session_conf_options(cfg.CONF, 'service_auth') def list_opts(): return [ ('ovn', ovn_opts), ('neutron', neutron_opts), ] def get_ovn_nb_connection(): return cfg.CONF.ovn.ovn_nb_connection def get_ovn_nb_private_key(): return cfg.CONF.ovn.ovn_nb_private_key def get_ovn_nb_certificate(): return cfg.CONF.ovn.ovn_nb_certificate def get_ovn_nb_ca_cert(): return cfg.CONF.ovn.ovn_nb_ca_cert def get_ovn_sb_connection(): return cfg.CONF.ovn.ovn_sb_connection def get_ovn_sb_private_key(): return cfg.CONF.ovn.ovn_sb_private_key def get_ovn_sb_certificate(): return cfg.CONF.ovn.ovn_sb_certificate def get_ovn_sb_ca_cert(): return cfg.CONF.ovn.ovn_sb_ca_cert def get_ovn_ovsdb_timeout(): return cfg.CONF.ovn.ovsdb_connection_timeout def get_ovn_ovsdb_retry_max_interval(): return cfg.CONF.ovn.ovsdb_retry_max_interval def get_ovn_ovsdb_probe_interval(): return cfg.CONF.ovn.ovsdb_probe_interval ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/common/constants.py0000664000175000017500000000750500000000000025721 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia_lib.common import constants # TODO(mjozefcz): Use those variables from neutron-lib once released. LRP_PREFIX = "lrp-" LB_VIP_PORT_PREFIX = "ovn-lb-vip-" OVN_PORT_NAME_EXT_ID_KEY = 'neutron:port_name' OVN_ROUTER_NAME_EXT_ID_KEY = 'neutron:router_name' OVN_PORT_NAME_EXT_ID_KEY = 'neutron:port_name' OVN_PORT_FIP_EXT_ID_KEY = 'neutron:port_fip' OVN_SUBNET_EXT_ID_KEY = 'neutron:subnet_id' OVN_SUBNET_EXT_IDS_KEY = 'neutron:subnet_ids' OVN_NETWORK_NAME_EXT_ID_KEY = 'neutron:network_name' OVN_SG_IDS_EXT_ID_KEY = 'neutron:security_group_ids' OVN_DEVICE_OWNER_EXT_ID_KEY = 'neutron:device_owner' OVN_FIP_EXT_ID_KEY = 'neutron:fip_id' OVN_FIP_PORT_EXT_ID_KEY = 'neutron:fip_port_id' OVN_GW_PORT_EXT_ID_KEY = 'neutron:gw_port_id' OVN_PORT_CIDR_EXT_ID_KEY = 'neutron:cidrs' LB_EXT_IDS_LS_REFS_KEY = 'ls_refs' LB_EXT_IDS_LR_REF_KEY = 'lr_ref' LB_EXT_IDS_POOL_PREFIX = 'pool_' LB_EXT_IDS_LISTENER_PREFIX = 'listener_' LB_EXT_IDS_MEMBER_PREFIX = 'member_' LB_EXT_IDS_HM_KEY = 'octavia:healthmonitor' LB_EXT_IDS_VIP_KEY = 'neutron:vip' LB_EXT_IDS_VIP_FIP_KEY = 'neutron:vip_fip' LB_EXT_IDS_VIP_PORT_ID_KEY = 'neutron:vip_port_id' PORT_FORWARDING_PLUGIN = 'port_forwarding_plugin' # Auth sections SERVICE_AUTH = 'service_auth' # Request type constants REQ_TYPE_LB_CREATE = 'lb_create' REQ_TYPE_LB_DELETE = 'lb_delete' REQ_TYPE_LB_UPDATE = 'lb_update' REQ_TYPE_LISTENER_CREATE = 'listener_create' REQ_TYPE_LISTENER_DELETE = 'listener_delete' REQ_TYPE_LISTENER_UPDATE = 'listener_update' REQ_TYPE_POOL_CREATE = 'pool_create' REQ_TYPE_POOL_DELETE = 'pool_delete' REQ_TYPE_POOL_UPDATE = 'pool_update' REQ_TYPE_MEMBER_CREATE = 'member_create' REQ_TYPE_MEMBER_DELETE = 'member_delete' REQ_TYPE_MEMBER_UPDATE = 'member_update' REQ_TYPE_LB_CREATE_LRP_ASSOC = 'lb_create_lrp_assoc' REQ_TYPE_LB_DELETE_LRP_ASSOC = 'lb_delete_lrp_assoc' REQ_TYPE_HANDLE_VIP_FIP = 'handle_vip_fip' REQ_TYPE_HANDLE_MEMBER_DVR = 'handle_member_dvr' REQ_TYPE_HM_CREATE = 'hm_create' REQ_TYPE_HM_UPDATE = 'hm_update' REQ_TYPE_HM_DELETE = 'hm_delete' REQ_TYPE_HM_UPDATE_EVENT = 'hm_update_event' REQ_TYPE_EXIT = 'exit' # Request information constants REQ_INFO_ACTION_ASSOCIATE = 'associate' REQ_INFO_ACTION_DISASSOCIATE = 'disassociate' REQ_INFO_MEMBER_ADDED = 'member_added' REQ_INFO_MEMBER_DELETED = 'member_deleted' # Disabled resources have a ':D' at the end DISABLED_RESOURCE_SUFFIX = 'D' # This driver only supports TCP, UDP and SCTP, with a single LB algorithm OVN_NATIVE_LB_PROTOCOLS = [constants.PROTOCOL_TCP, constants.PROTOCOL_UDP, constants.PROTOCOL_SCTP, ] OVN_NATIVE_LB_ALGORITHMS = [constants.LB_ALGORITHM_SOURCE_IP_PORT, ] # This driver only supports UDP Connect and TCP health monitors SUPPORTED_HEALTH_MONITOR_TYPES = [constants.HEALTH_MONITOR_UDP_CONNECT, constants.HEALTH_MONITOR_TCP] # Prepended to exception log messages EXCEPTION_MSG = "Exception occurred during %s" # Used in functional tests LR_REF_KEY_HEADER = 'neutron-' # LB selection fields to represent LB algorithm LB_SELECTION_FIELDS_MAP = { constants.LB_ALGORITHM_SOURCE_IP_PORT: ["ip_dst", "ip_src", "tp_dst", "tp_src"], constants.LB_ALGORITHM_SOURCE_IP: ["ip_src", "ip_dst"], None: ["ip_src", "ip_dst", "tp_src", "tp_dst"], } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/common/exceptions.py0000664000175000017500000000243100000000000026057 0ustar00zuulzuul00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from octavia_lib.api.drivers import exceptions as driver_exceptions from ovn_octavia_provider.i18n import _ class RevisionConflict(n_exc.NeutronException): message = _('OVN revision number for %(resource_id)s (type: ' '%(resource_type)s) is equal or higher than the given ' 'resource. Skipping update') class IPVersionsMixingNotSupportedError( driver_exceptions.UnsupportedOptionError): user_fault_string = _('OVN provider does not support mixing IPv4/IPv6 ' 'configuration within the same Load Balancer.') operator_fault_string = user_fault_string ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/common/utils.py0000664000175000017500000000376400000000000025050 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import netutils from ovn_octavia_provider.common import constants def ovn_name(id): # The name of the OVN entry will be neutron- # This is due to the fact that the OVN application checks if the name # is a UUID. If so then there will be no matches. # We prefix the UUID to enable us to use the Neutron UUID when # updating, deleting etc. return 'neutron-%s' % id def ovn_lrouter_port_name(id): # The name of the OVN lrouter port entry will be lrp- # This is to distinguish with the name of the connected lswitch patch port, # which is named with neutron port uuid, so that OVS patch ports are # generated properly. The pairing patch port names will be: # - patch-lrp--to- # - patch--to-lrp- # lrp stands for Logical Router Port return constants.LRP_PREFIX + '%s' % id def remove_macs_from_lsp_addresses(addresses): """Remove the mac addreses from the Logical_Switch_Port addresses column. :param addresses: The list of addresses from the Logical_Switch_Port. Example: ["80:fa:5b:06:72:b7 158.36.44.22", "ff:ff:ff:ff:ff:ff 10.0.0.2"] :returns: A list of IP addesses (v4 and v6) """ ip_list = [] for addr in addresses: ip_list.extend([x for x in addr.split() if (netutils.is_valid_ipv4(x) or netutils.is_valid_ipv6(x))]) return ip_list ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/driver.py0000664000175000017500000005143300000000000023707 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import netaddr from octavia_lib.api.drivers import data_models as o_datamodels from octavia_lib.api.drivers import exceptions as driver_exceptions from octavia_lib.api.drivers import provider_base as driver_base from octavia_lib.common import constants from oslo_log import log as logging from ovn_octavia_provider.common import config as ovn_conf # TODO(mjozefcz): Start consuming const and utils # from neutron-lib once released. from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider.common import exceptions as ovn_exc from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.i18n import _ ovn_conf.register_opts() LOG = logging.getLogger(__name__) class OvnProviderDriver(driver_base.ProviderDriver): def __init__(self): super().__init__() self._ovn_helper = ovn_helper.OvnProviderHelper() def __del__(self): self._ovn_helper.shutdown() def _is_health_check_supported(self): return self._ovn_helper.ovn_nbdb_api.is_col_present( 'Load_Balancer', 'health_check') def _check_for_supported_protocols(self, protocol): if protocol not in ovn_const.OVN_NATIVE_LB_PROTOCOLS: msg = _('OVN provider does not support %s protocol') % protocol raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def _check_for_supported_algorithms(self, algorithm): if algorithm not in ovn_const.OVN_NATIVE_LB_ALGORITHMS: msg = _('OVN provider does not support %s algorithm') % algorithm raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def _check_for_allowed_cidrs(self, allowed_cidrs): # TODO(haleyb): add support for this if isinstance(allowed_cidrs, o_datamodels.UnsetType): allowed_cidrs = [] if allowed_cidrs: msg = _('OVN provider does not support allowed_cidrs option') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def loadbalancer_create(self, loadbalancer): admin_state_up = loadbalancer.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': loadbalancer.loadbalancer_id, 'vip_address': loadbalancer.vip_address, 'vip_network_id': loadbalancer.vip_network_id, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': request_info} self._ovn_helper.add_request(request) if not isinstance(loadbalancer.listeners, o_datamodels.UnsetType): for listener in loadbalancer.listeners: self.listener_create(listener) if not isinstance(loadbalancer.pools, o_datamodels.UnsetType): for pool in loadbalancer.pools: self.pool_create(pool) for member in pool.members: if not member.subnet_id: member.subnet_id = loadbalancer.vip_network_id self.member_create(member) def loadbalancer_delete(self, loadbalancer, cascade=False): request_info = {'id': loadbalancer.loadbalancer_id, 'cascade': cascade} request = {'type': ovn_const.REQ_TYPE_LB_DELETE, 'info': request_info} self._ovn_helper.add_request(request) def loadbalancer_failover(self, loadbalancer_id): msg = _('OVN provider does not support loadbalancer failover') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): request_info = {'id': new_loadbalancer.loadbalancer_id} if not isinstance( new_loadbalancer.admin_state_up, o_datamodels.UnsetType): request_info['admin_state_up'] = new_loadbalancer.admin_state_up request = {'type': ovn_const.REQ_TYPE_LB_UPDATE, 'info': request_info} self._ovn_helper.add_request(request) # Pool def pool_create(self, pool): self._check_for_supported_protocols(pool.protocol) self._check_for_supported_algorithms(pool.lb_algorithm) admin_state_up = pool.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': pool.pool_id, 'loadbalancer_id': pool.loadbalancer_id, 'protocol': pool.protocol, 'lb_algorithm': pool.lb_algorithm, 'listener_id': pool.listener_id, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': request_info} self._ovn_helper.add_request(request) def pool_delete(self, pool): for member in pool.members: self.member_delete(member) request_info = {'id': pool.pool_id, 'protocol': pool.protocol, 'loadbalancer_id': pool.loadbalancer_id} request = {'type': ovn_const.REQ_TYPE_POOL_DELETE, 'info': request_info} self._ovn_helper.add_request(request) def pool_update(self, old_pool, new_pool): if not isinstance(new_pool.protocol, o_datamodels.UnsetType): self._check_for_supported_protocols(new_pool.protocol) if not isinstance(new_pool.lb_algorithm, o_datamodels.UnsetType): self._check_for_supported_algorithms(new_pool.lb_algorithm) request_info = {'id': old_pool.pool_id, 'protocol': old_pool.protocol, 'loadbalancer_id': old_pool.loadbalancer_id} if not isinstance(new_pool.admin_state_up, o_datamodels.UnsetType): request_info['admin_state_up'] = new_pool.admin_state_up request = {'type': ovn_const.REQ_TYPE_POOL_UPDATE, 'info': request_info} self._ovn_helper.add_request(request) def listener_create(self, listener): self._check_for_supported_protocols(listener.protocol) self._check_for_allowed_cidrs(listener.allowed_cidrs) admin_state_up = listener.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': listener.listener_id, 'protocol': listener.protocol, 'loadbalancer_id': listener.loadbalancer_id, 'protocol_port': listener.protocol_port, 'default_pool_id': listener.default_pool_id, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': request_info} self._ovn_helper.add_request(request) def listener_delete(self, listener): request_info = {'id': listener.listener_id, 'loadbalancer_id': listener.loadbalancer_id, 'protocol_port': listener.protocol_port, 'protocol': listener.protocol} request = {'type': ovn_const.REQ_TYPE_LISTENER_DELETE, 'info': request_info} self._ovn_helper.add_request(request) def listener_update(self, old_listener, new_listener): self._check_for_allowed_cidrs(new_listener.allowed_cidrs) request_info = {'id': new_listener.listener_id, 'loadbalancer_id': old_listener.loadbalancer_id, 'protocol': old_listener.protocol, 'protocol_port': old_listener.protocol_port} if not isinstance(new_listener.admin_state_up, o_datamodels.UnsetType): request_info['admin_state_up'] = new_listener.admin_state_up if not isinstance(new_listener.default_pool_id, o_datamodels.UnsetType): request_info['default_pool_id'] = new_listener.default_pool_id request = {'type': ovn_const.REQ_TYPE_LISTENER_UPDATE, 'info': request_info} self._ovn_helper.add_request(request) # Member def _check_monitor_options(self, member): if (isinstance(member.monitor_address, o_datamodels.UnsetType) and isinstance(member.monitor_port, o_datamodels.UnsetType)): return False if member.monitor_address or member.monitor_port: return True return False def _check_member_monitor_options(self, member): if self._check_monitor_options(member): msg = _('OVN Load Balancer does not support different member ' 'monitor address or port.') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def _ip_version_differs(self, member): _, ovn_lb = self._ovn_helper._find_ovn_lb_by_pool_id(member.pool_id) if not ovn_lb: return False lb_vip = ovn_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] return netaddr.IPNetwork(lb_vip).version != ( netaddr.IPNetwork(member.address).version) def member_create(self, member): # Validate monitoring options if present self._check_member_monitor_options(member) if self._ip_version_differs(member): raise ovn_exc.IPVersionsMixingNotSupportedError() admin_state_up = member.admin_state_up subnet_id = member.subnet_id if (isinstance(subnet_id, o_datamodels.UnsetType) or not subnet_id): subnet_id = self._ovn_helper._get_subnet_from_pool(member.pool_id) if not subnet_id: msg = _('Subnet is required, or Loadbalancer associated with ' 'Pool must have a subnet, for Member creation ' 'with OVN Provider Driver') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': member.member_id, 'address': member.address, 'protocol_port': member.protocol_port, 'pool_id': member.pool_id, 'subnet_id': subnet_id, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_MEMBER_CREATE, 'info': request_info} self._ovn_helper.add_request(request) # NOTE(mjozefcz): If LB has FIP on VIP # and member has FIP we need to centralize # traffic for member. request_info = {'id': member.member_id, 'address': member.address, 'pool_id': member.pool_id, 'subnet_id': subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} request = {'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': request_info} self._ovn_helper.add_request(request) def member_delete(self, member): request_info = {'id': member.member_id, 'address': member.address, 'protocol_port': member.protocol_port, 'pool_id': member.pool_id, 'subnet_id': member.subnet_id} request = {'type': ovn_const.REQ_TYPE_MEMBER_DELETE, 'info': request_info} self._ovn_helper.add_request(request) # NOTE(mjozefcz): If LB has FIP on VIP # and member had FIP we can decentralize # the traffic now. request_info = {'id': member.member_id, 'address': member.address, 'pool_id': member.pool_id, 'subnet_id': member.subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_DELETED} request = {'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': request_info} self._ovn_helper.add_request(request) def member_update(self, old_member, new_member): # Validate monitoring options if present self._check_member_monitor_options(new_member) if new_member.address and self._ip_version_differs(new_member): raise ovn_exc.IPVersionsMixingNotSupportedError() request_info = {'id': new_member.member_id, 'address': old_member.address, 'protocol_port': old_member.protocol_port, 'pool_id': old_member.pool_id, 'subnet_id': old_member.subnet_id, 'old_admin_state_up': old_member.admin_state_up} if not isinstance(new_member.admin_state_up, o_datamodels.UnsetType): request_info['admin_state_up'] = new_member.admin_state_up request = {'type': ovn_const.REQ_TYPE_MEMBER_UPDATE, 'info': request_info} self._ovn_helper.add_request(request) def member_batch_update(self, pool_id, members): request_list = [] skipped_members = [] pool_key, ovn_lb = self._ovn_helper._find_ovn_lb_by_pool_id(pool_id) external_ids = copy.deepcopy(ovn_lb.external_ids) pool = external_ids[pool_key] existing_members = pool.split(',') if pool else [] members_to_delete = copy.copy(existing_members) for member in members: if (self._check_monitor_options(member) or member.address and self._ip_version_differs(member)): skipped_members.append(member.member_id) continue # NOTE(mjozefcz): We need to have subnet_id information. if (isinstance(member.subnet_id, o_datamodels.UnsetType) or not member.subnet_id): msg = _('Subnet is required for Member creation ' 'with OVN Provider Driver') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) admin_state_up = member.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True member_info = self._ovn_helper._get_member_info(member) if member_info not in existing_members: req_type = ovn_const.REQ_TYPE_MEMBER_CREATE else: # If member exists in pool, then Update req_type = ovn_const.REQ_TYPE_MEMBER_UPDATE # Remove all updating members so only deleted ones are left members_to_delete.remove(member_info) request_info = {'id': member.member_id, 'address': member.address, 'protocol_port': member.protocol_port, 'pool_id': member.pool_id, 'subnet_id': member.subnet_id, 'admin_state_up': admin_state_up} request = {'type': req_type, 'info': request_info} request_list.append(request) for member in members_to_delete: member_info = member.split('_') request_info = {'id': member_info[1], 'address': member_info[2].split(':')[0], 'protocol_port': member_info[2].split(':')[1], 'pool_id': pool_id} if len(member_info) == 4: request_info['subnet_id'] = member_info[3] request = {'type': ovn_const.REQ_TYPE_MEMBER_DELETE, 'info': request_info} request_list.append(request) for request in request_list: self._ovn_helper.add_request(request) if skipped_members: msg = (_('OVN provider does not support monitor options, ' 'so following members skipped: %s') % skipped_members) raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def create_vip_port(self, lb_id, project_id, vip_dict): try: port = self._ovn_helper.create_vip_port( project_id, lb_id, vip_dict)['port'] vip_dict[constants.VIP_PORT_ID] = port['id'] vip_dict[constants.VIP_ADDRESS] = ( port['fixed_ips'][0]['ip_address']) except Exception as e: kwargs = {} if hasattr(e, 'message'): kwargs = {'user_fault_string': e.message, 'operator_fault_string': e.message} raise driver_exceptions.DriverError( **kwargs) return vip_dict def _validate_hm_support(self, hm, action='create'): if not self._is_health_check_supported(): msg = _('OVN Load Balancer supports Health Check provider ' 'from version 2.12. Upgrade OVN in order to use it.') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) # type is only required for create if action == 'create': if isinstance(hm.type, o_datamodels.UnsetType): msg = _('OVN provider health monitor type not specified.') # seems this should be other than "unsupported"? raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) if hm.type not in ovn_const.SUPPORTED_HEALTH_MONITOR_TYPES: msg = (_('OVN provider does not support %s ' 'health monitor type. Supported types: %s') % (hm.type, ', '.join(ovn_const.SUPPORTED_HEALTH_MONITOR_TYPES))) raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def health_monitor_create(self, healthmonitor): self._validate_hm_support(healthmonitor) admin_state_up = healthmonitor.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': healthmonitor.healthmonitor_id, 'pool_id': healthmonitor.pool_id, 'type': healthmonitor.type, 'interval': healthmonitor.delay, 'timeout': healthmonitor.timeout, 'failure_count': healthmonitor.max_retries_down, 'success_count': healthmonitor.max_retries, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_HM_CREATE, 'info': request_info} self._ovn_helper.add_request(request) def health_monitor_update(self, old_healthmonitor, new_healthmonitor): self._validate_hm_support(new_healthmonitor, action='update') admin_state_up = new_healthmonitor.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': new_healthmonitor.healthmonitor_id, 'pool_id': old_healthmonitor.pool_id, 'interval': new_healthmonitor.delay, 'timeout': new_healthmonitor.timeout, 'failure_count': new_healthmonitor.max_retries_down, 'success_count': new_healthmonitor.max_retries, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_HM_UPDATE, 'info': request_info} self._ovn_helper.add_request(request) def health_monitor_delete(self, healthmonitor): request_info = {'id': healthmonitor.healthmonitor_id} request = {'type': ovn_const.REQ_TYPE_HM_DELETE, 'info': request_info} self._ovn_helper.add_request(request) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/event.py0000664000175000017500000000600600000000000023531 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from ovsdbapp.backend.ovs_idl import event as row_event # TODO(mjozefcz): Start consuming const and utils # from neutron-lib once released. from ovn_octavia_provider.common import constants as ovn_const LOG = logging.getLogger(__name__) class LogicalRouterPortEvent(row_event.RowEvent): def __init__(self, driver): table = 'Logical_Router_Port' events = (self.ROW_CREATE, self.ROW_DELETE) super().__init__(events, table, None) self.event_name = 'LogicalRouterPortEvent' self.driver = driver def run(self, event, row, old): LOG.debug('LogicalRouterPortEvent logged, ' '%(event)s, %(row)s', {'event': event, 'row': row}) if row.gateway_chassis: return if event == self.ROW_CREATE: self.driver.lb_create_lrp_assoc_handler(row) elif event == self.ROW_DELETE: self.driver.lb_delete_lrp_assoc_handler(row) class LogicalSwitchPortUpdateEvent(row_event.RowEvent): def __init__(self, driver): table = 'Logical_Switch_Port' events = (self.ROW_UPDATE,) super().__init__(events, table, None) self.event_name = 'LogicalSwitchPortUpdateEvent' self.driver = driver def run(self, event, row, old): LOG.debug('LogicalSwitchPortUpdateEvent logged, ' '%(event)s, %(row)s', {'event': event, 'row': row}) # Get the neutron:port_name from external_ids and check if # it's a vip port or not. port_name = row.external_ids.get( ovn_const.OVN_PORT_NAME_EXT_ID_KEY, '') if port_name.startswith(ovn_const.LB_VIP_PORT_PREFIX): # Handle port update only for vip ports created by # this driver. self.driver.vip_port_update_handler(row) class ServiceMonitorUpdateEvent(row_event.RowEvent): def __init__(self, driver): table = 'Service_Monitor' events = (self.ROW_UPDATE,) super().__init__(events, table, None) self.event_name = 'ServiceMonitorUpdateEvent' self.driver = driver def run(self, event, row, old): LOG.debug('ServiceMonitorUpdateEvent logged, ' '%(event)s, %(row)s', {'event': event, 'row': row}) self.driver.hm_update_event_handler(row) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4525883 ovn-octavia-provider-2.0.0/ovn_octavia_provider/hacking/0000775000175000017500000000000000000000000023440 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/hacking/__init__.py0000664000175000017500000000000000000000000025537 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/hacking/checks.py0000664000175000017500000001537200000000000025262 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for OVN Octavia provider specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range N3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the N3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to ovn_octavia_provider/tests/unit/hacking/test_checks.py """ import re from hacking import core unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b") unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b") filter_match = re.compile(r".*filter\(lambda ") tests_imports_dot = re.compile(r"\bimport[\s]+ovn_octavia_provider.tests\b") tests_imports_from1 = re.compile(r"\bfrom[\s]+ovn_octavia_provider.tests\b") tests_imports_from2 = re.compile( r"\bfrom[\s]+ovn_octavia_provider[\s]+import[\s]+tests\b") no_line_continuation_backslash_re = re.compile(r'.*(\\)\n') import_mock = re.compile(r"\bimport[\s]+mock\b") import_from_mock = re.compile(r"\bfrom[\s]+mock[\s]+import\b") @core.flake8ext def check_assert_called_once_with(logical_line, filename): """Try to detect unintended calls of nonexistent mock methods like: assert_called_once assertCalledOnceWith assert_has_called called_once_with N322 """ if 'ovn_octavia_provider/tests/' in filename: if '.assert_called_once_with(' in logical_line: return uncased_line = logical_line.lower().replace('_', '') check_calls = ['.assertcalledonce', '.calledoncewith'] if any(x for x in check_calls if x in uncased_line): msg = ("N322: Possible use of no-op mock method. " "please use assert_called_once_with.") yield (0, msg) if '.asserthascalled' in uncased_line: msg = ("N322: Possible use of no-op mock method. " "please use assert_has_calls.") yield (0, msg) @core.flake8ext def check_asserttruefalse(logical_line, filename): """N328 - Don't use assertEqual(True/False, observed).""" if 'ovn_octavia_provider/tests/' in filename: if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?", logical_line): msg = ("N328: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?", logical_line): msg = ("N328: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) @core.flake8ext def check_assertempty(logical_line, filename): """Enforce using assertEqual parameter ordering in case of empty objects. N330 """ if 'ovn_octavia_provider/tests/' in filename: msg = ("N330: Use assertEqual(*empty*, observed) instead of " "assertEqual(observed, *empty*). *empty* contains " "{}, [], (), set(), '', \"\"") empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")" reg = fr"assertEqual\(([^,]*,\s*)+?{empties}\)\s*$" if re.search(reg, logical_line): yield (0, msg) @core.flake8ext def check_assertisinstance(logical_line, filename): """N331 - Enforce using assertIsInstance.""" if 'ovn_octavia_provider/tests/' in filename: if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)", logical_line): msg = ("N331: Use assertIsInstance(observed, type) instead " "of assertTrue(isinstance(observed, type))") yield (0, msg) @core.flake8ext def check_assertequal_for_httpcode(logical_line, filename): """N332 - Enforce correct oredering for httpcode in assertEqual.""" msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) " "instead of assertEqual(observed_http_code, expected_http_code)") if 'ovn_octavia_provider/tests/' in filename: if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)", logical_line): yield (0, msg) @core.flake8ext def check_no_imports_from_tests(logical_line, filename): """N343 - Production code must not import from ovn_octavia_provider.tests.* """ msg = ("N343 Production code must not import from " "ovn_octavia_provider.tests.*") if 'ovn_octavia_provider/tests/' in filename: return for regex in tests_imports_dot, tests_imports_from1, tests_imports_from2: if re.match(regex, logical_line): yield(0, msg) @core.flake8ext def check_python3_no_filter(logical_line): """N344 - Use list comprehension instead of filter(lambda).""" msg = ("N344: Use list comprehension instead of " "filter(lambda obj: test(obj), data) on python3.") if filter_match.match(logical_line): yield(0, msg) @core.flake8ext def check_no_import_mock(logical_line, filename, noqa): """N347 - Test code must not import mock library.""" msg = ("N347: Test code must not import mock library") if noqa: return if 'ovn_octavia_provider/tests/' not in filename: return for regex in import_mock, import_from_mock: if re.match(regex, logical_line): yield(0, msg) @core.flake8ext def check_assertcountequal(logical_line, filename): """N348 - Enforce using assertCountEqual.""" msg = ("N348: Use assertCountEqual(expected, observed) " "instead of assertItemsEqual(observed, expected)") if 'ovn_octavia_provider/tests/' in filename: if re.search(r"assertItemsEqual\([^,]*,\s*(,[^,]*)?", logical_line): yield (0, msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/helper.py0000664000175000017500000032220300000000000023667 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import queue import re import threading import netaddr from neutron_lib import constants as n_const from neutronclient.common import exceptions as n_exc from octavia_lib.api.drivers import data_models as o_datamodels from octavia_lib.api.drivers import driver_lib as o_driver_lib from octavia_lib.api.drivers import exceptions as driver_exceptions from octavia_lib.common import constants from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from ovs.stream import Stream from ovsdbapp.backend.ovs_idl import idlutils import tenacity from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import config as ovn_conf # TODO(mjozefcz): Start consuming const and utils # from neutron-lib once released. from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider.common import utils from ovn_octavia_provider.i18n import _ from ovn_octavia_provider.ovsdb import impl_idl_ovn CONF = cfg.CONF # Gets Octavia Conf as it runs under o-api domain ovn_conf.register_opts() LOG = logging.getLogger(__name__) class OvnProviderHelper(): def __init__(self): self.requests = queue.Queue() self.helper_thread = threading.Thread(target=self.request_handler) self.helper_thread.daemon = True self._octavia_driver_lib = o_driver_lib.DriverLibrary() self._check_and_set_ssl_files() self._init_lb_actions() # NOTE(mjozefcz): This API is only for handling octavia API requests. self.ovn_nbdb = impl_idl_ovn.OvnNbIdlForLb() self.ovn_nbdb_api = self.ovn_nbdb.start() self.helper_thread.start() def _init_lb_actions(self): self._lb_request_func_maps = { ovn_const.REQ_TYPE_LB_CREATE: self.lb_create, ovn_const.REQ_TYPE_LB_DELETE: self.lb_delete, ovn_const.REQ_TYPE_LB_UPDATE: self.lb_update, ovn_const.REQ_TYPE_LISTENER_CREATE: self.listener_create, ovn_const.REQ_TYPE_LISTENER_DELETE: self.listener_delete, ovn_const.REQ_TYPE_LISTENER_UPDATE: self.listener_update, ovn_const.REQ_TYPE_POOL_CREATE: self.pool_create, ovn_const.REQ_TYPE_POOL_DELETE: self.pool_delete, ovn_const.REQ_TYPE_POOL_UPDATE: self.pool_update, ovn_const.REQ_TYPE_MEMBER_CREATE: self.member_create, ovn_const.REQ_TYPE_MEMBER_DELETE: self.member_delete, ovn_const.REQ_TYPE_MEMBER_UPDATE: self.member_update, ovn_const.REQ_TYPE_LB_CREATE_LRP_ASSOC: self.lb_create_lrp_assoc, ovn_const.REQ_TYPE_LB_DELETE_LRP_ASSOC: self.lb_delete_lrp_assoc, ovn_const.REQ_TYPE_HANDLE_VIP_FIP: self.handle_vip_fip, ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR: self.handle_member_dvr, ovn_const.REQ_TYPE_HM_CREATE: self.hm_create, ovn_const.REQ_TYPE_HM_UPDATE: self.hm_update, ovn_const.REQ_TYPE_HM_DELETE: self.hm_delete, ovn_const.REQ_TYPE_HM_UPDATE_EVENT: self.hm_update_event, } @staticmethod def _is_lb_empty(external_ids): """Check if there is no pool or listener defined.""" return not any(k.startswith('listener') or k.startswith('pool') for k in external_ids) @staticmethod def _delete_disabled_from_status(status): # pylint: disable=multiple-statements d_regex = f':{ovn_const.DISABLED_RESOURCE_SUFFIX}$' return { k: [{c: re.sub(d_regex, '', d) for c, d in i.items()} for i in v] for k, v in status.items()} def _check_and_set_ssl_files(self): # TODO(reedip): Make ovsdb_monitor's _check_and_set_ssl_files() public # This is a copy of ovsdb_monitor._check_and_set_ssl_files priv_key_file = ovn_conf.get_ovn_nb_private_key() cert_file = ovn_conf.get_ovn_nb_certificate() ca_cert_file = ovn_conf.get_ovn_nb_ca_cert() if priv_key_file: Stream.ssl_set_private_key_file(priv_key_file) if cert_file: Stream.ssl_set_certificate_file(cert_file) if ca_cert_file: Stream.ssl_set_ca_cert_file(ca_cert_file) def shutdown(self): self.requests.put({'type': ovn_const.REQ_TYPE_EXIT}) self.helper_thread.join() self.ovn_nbdb.stop() del self.ovn_nbdb_api @staticmethod def _map_val(row, col, key): # If the row doesnt exist, RowNotFound is raised by the _map_val # and is expected to be caught by the caller. try: return getattr(row, col)[key] except KeyError as e: raise idlutils.RowNotFound(table=row._table.name, col=col, match=key) from e def _ensure_hm_ovn_port(self, network_id): # We need to have a metadata or dhcp port, OVN should have created # one when the network was created neutron_client = clients.get_neutron_client() meta_dhcp_port = neutron_client.list_ports( network_id=network_id, device_owner=n_const.DEVICE_OWNER_DISTRIBUTED) if meta_dhcp_port['ports']: return meta_dhcp_port['ports'][0] def _get_nw_router_info_on_interface_event(self, lrp): """Get the Router and Network information on an interface event This function is called when a new interface between a router and a network is added or deleted. Input: Logical Router Port row which is coming from LogicalRouterPortEvent. Output: A row from router table and network table matching the router and network for which the event was generated. Exception: RowNotFound exception can be generated. """ router = self.ovn_nbdb_api.lookup( 'Logical_Router', utils.ovn_name(self._map_val( lrp, 'external_ids', ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY))) network = self.ovn_nbdb_api.lookup( 'Logical_Switch', self._map_val(lrp, 'external_ids', ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY)) return router, network def _clean_lb_if_empty(self, ovn_lb, lb_id, external_ids): commands = [] lb_to_delete = False if OvnProviderHelper._is_lb_empty(external_ids): # Verify if its only OVN LB defined. If so - leave with # undefined protocol. If there is different for other protocol # remove this one. try: defined_ovn_lbs = self._find_ovn_lbs(lb_id) except idlutils.RowNotFound: defined_ovn_lbs = [] if len(defined_ovn_lbs) == 1: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('protocol', []))) elif len(defined_ovn_lbs) > 1: # Delete the lb. commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid)) lb_to_delete = True return (commands, lb_to_delete) def lb_delete_lrp_assoc_handler(self, row): try: router, network = self._get_nw_router_info_on_interface_event(row) except idlutils.RowNotFound: LOG.debug("Router or network information not found") return request_info = {'network': network, 'router': router} self.add_request({'type': ovn_const.REQ_TYPE_LB_DELETE_LRP_ASSOC, 'info': request_info}) def lb_delete_lrp_assoc(self, info): # TODO(reedip): When OVS>=2.12, LB can be deleted without removing # Network and Router references as pushed in the patch # https://github.com/openvswitch/ovs/commit # /612f80fa8ebf88dad2e204364c6c02b451dca36c commands = [] network = info['network'] router = info['router'] # Find all loadbalancers which have a reference with the network nw_lb = self._find_lb_in_ls(network=network) # Find all loadbalancers which have a reference with the router r_lb = set(router.load_balancer) - nw_lb # Delete all LB on N/W from Router for nlb in nw_lb: commands.extend(self._update_lb_to_lr_association(nlb, router, delete=True)) # Delete all LB on Router from N/W for rlb in r_lb: commands.append(self.ovn_nbdb_api.ls_lb_del( network.uuid, rlb.uuid)) if commands: self._execute_commands(commands) def lb_create_lrp_assoc_handler(self, row): try: router, network = self._get_nw_router_info_on_interface_event(row) except idlutils.RowNotFound: LOG.debug("Router or network information not found") return request_info = {'network': network, 'router': router} self.add_request({'type': ovn_const.REQ_TYPE_LB_CREATE_LRP_ASSOC, 'info': request_info}) def lb_create_lrp_assoc(self, info): commands = [] router_lb = set(info['router'].load_balancer) network_lb = set(info['network'].load_balancer) # Add only those lb to routers which are unique to the network for lb in (network_lb - router_lb): commands.extend(self._update_lb_to_lr_association( lb, info['router'])) # Add those lb to the network which are unique to the router for lb in (router_lb - network_lb): commands.append(self.ovn_nbdb_api.ls_lb_add( info['network'].uuid, lb.uuid, may_exist=True)) if commands: self._execute_commands(commands) def vip_port_update_handler(self, vip_lp): """Handler for VirtualIP port updates. If a floating ip is associated to a vip port, then networking-ovn sets the fip in the external_ids column of the logical port as: Logical_Switch_Port.external_ids:port_fip = . Then, in the Load_Balancer table for the vip, networking-ovn creates another vip entry for the FIP. If a floating ip is disassociated from the vip, then it deletes the vip entry for the FIP. """ port_name = vip_lp.external_ids.get(ovn_const.OVN_PORT_NAME_EXT_ID_KEY) lb_id = port_name[len(ovn_const.LB_VIP_PORT_PREFIX):] try: ovn_lbs = self._find_ovn_lbs_with_retry(lb_id) except idlutils.RowNotFound: LOG.debug("Loadbalancer %s not found!", lb_id) return # Loop over all defined LBs with given ID, because it is possible # than there is more than one (for more than 1 L4 protocol). for lb in ovn_lbs: fip = vip_lp.external_ids.get(ovn_const.OVN_PORT_FIP_EXT_ID_KEY) lb_vip_fip = lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) request_info = {'ovn_lb': lb, 'vip_fip': fip} if fip and fip != lb_vip_fip: request_info['action'] = ovn_const.REQ_INFO_ACTION_ASSOCIATE elif fip is None and fip != lb_vip_fip: request_info['action'] = ovn_const.REQ_INFO_ACTION_DISASSOCIATE else: continue self.add_request({'type': ovn_const.REQ_TYPE_HANDLE_VIP_FIP, 'info': request_info}) def _find_lb_in_ls(self, network): """Find LB associated to a Network using Network information This function retrieves those loadbalancers whose ls_ref column in the OVN northbound database's load_balancer table has the network's name. Though different networks can be associated with a loadbalancer, but ls_ref of a loadbalancer points to the network where it was actually created, and this function tries to retrieve all those loadbalancers created on this network. Input : row of type Logical_Switch Output: set of rows of type Load_Balancer or empty set """ return {lb for lb in network.load_balancer if network.name in lb.external_ids.get( ovn_const.LB_EXT_IDS_LS_REFS_KEY, [])} def _find_lb_in_table(self, lb, table): return self.ovn_nbdb_api.find_lb_in_table( lb, table).execute(check_error=True) def request_handler(self): while True: request = self.requests.get() request_type = request['type'] if request_type == ovn_const.REQ_TYPE_EXIT: break request_handler = self._lb_request_func_maps.get(request_type) try: if request_handler: LOG.debug("Handling request %(req)s with info %(info)s", {'req': request_type, 'info': request['info']}) status = request_handler(request['info']) if status: self._update_status_to_octavia(status) self.requests.task_done() except driver_exceptions.UpdateStatusError as e: LOG.error("Error while updating the load balancer status: %s", e.fault_string) # TODO(haleyb): The resource(s) we were updating status for # should be cleaned-up except Exception: # If any unexpected exception happens we don't want the # notify_loop to exit. LOG.exception('Unexpected exception in request_handler') def add_request(self, req): self.requests.put(req) @tenacity.retry( retry=tenacity.retry_if_exception_type( driver_exceptions.UpdateStatusError), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_delay(10), reraise=True) def _update_status_to_octavia(self, status): status = OvnProviderHelper._delete_disabled_from_status(status) LOG.debug('Updating status to octavia: %s', status) self._octavia_driver_lib.update_loadbalancer_status(status) @tenacity.retry( retry=tenacity.retry_if_exception_type(idlutils.RowNotFound), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_delay(10), reraise=True) def _find_ovn_lbs_with_retry(self, lb_id, protocol=None): return self._find_ovn_lbs(lb_id, protocol=protocol) def _find_ovn_lbs(self, lb_id, protocol=None): """Find the Loadbalancers in OVN with the given lb_id as its name This function searches for the LoadBalancers whose Name has the pattern passed in lb_id. @param lb_id: LoadBalancer ID provided by Octavia in its API request. Note that OVN saves the above ID in the 'name' column. @type lb_id: str @param protocol: Loadbalancer protocol. @type protocol: str or None if not defined. :returns: LoadBalancer row if protocol specified or list of rows matching the lb_id. :raises: RowNotFound can be generated if the LoadBalancer is not found. """ lbs = self.ovn_nbdb_api.db_find_rows( 'Load_Balancer', ('name', '=', lb_id)).execute() if not protocol: if lbs: return lbs raise idlutils.RowNotFound(table='Load_Balancer', col='name', match=lb_id) # If there is only one LB without protocol defined, so # it is 'clean' LB record without any listener. if len(lbs) == 1 and not lbs[0].protocol: return lbs[0] # Search for other lbs. for lb in lbs: if lb.protocol[0].upper() == protocol.upper(): return lb raise idlutils.RowNotFound(table='Load_Balancer', col='name', match=lb_id) def _get_or_create_ovn_lb( self, lb_id, protocol, admin_state_up, lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT): """Find or create ovn lb with given protocol Find the loadbalancer configured with given protocol or create required if not found """ # TODO(mjozefcz): For now we support only one LB algorithm. # As we may extend that in the future we would need to # look here also for lb_algorithm, along with protocol. # Make sure that its lowercase - OVN NBDB stores lowercases # for this field. protocol = protocol.lower() ovn_lbs = self._find_ovn_lbs(lb_id) lbs_with_required_protocol = [ ovn_lb for ovn_lb in ovn_lbs if protocol in ovn_lb.protocol] lbs_with_no_protocol = [ovn_lb for ovn_lb in ovn_lbs if not ovn_lb.protocol] if lbs_with_required_protocol: # We found existing LB with required # protocol, just return it. return lbs_with_required_protocol[0] elif lbs_with_no_protocol: ovn_lb = lbs_with_no_protocol[0] # Set required protocol here. self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('protocol', protocol)).execute(check_error=True) else: # NOTE(mjozefcz): Looks like loadbalancer with given protocol # doesn't exist. Try to add it with required protocol # by copy the existing one data. lb_info = { 'id': lb_id, 'protocol': protocol, constants.LB_ALGORITHM: lb_algorithm, 'vip_address': ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_VIP_KEY), 'vip_port_id': ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY), ovn_const.LB_EXT_IDS_LR_REF_KEY: ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY), ovn_const.LB_EXT_IDS_LS_REFS_KEY: ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_LS_REFS_KEY), 'admin_state_up': admin_state_up} # NOTE(mjozefcz): Handle vip_fip info if exists. vip_fip = ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_VIP_FIP_KEY) if vip_fip: lb_info.update({ovn_const.LB_EXT_IDS_VIP_FIP_KEY: vip_fip}) self.lb_create(lb_info, protocol=protocol) # Looks like we've just added new LB # or updated exising, empty one. return self._find_ovn_lbs(lb_id, protocol=protocol) def _find_ovn_lb_with_pool_key(self, pool_key): lbs = self.ovn_nbdb_api.db_list_rows( 'Load_Balancer').execute(check_error=True) for lb in lbs: # Skip load balancers used by port forwarding plugin if lb.external_ids.get(ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY) == ( ovn_const.PORT_FORWARDING_PLUGIN): continue if pool_key in lb.external_ids: return lb def _find_ovn_lb_by_pool_id(self, pool_id): pool_key = self._get_pool_key(pool_id) ovn_lb = self._find_ovn_lb_with_pool_key(pool_key) if not ovn_lb: pool_key = self._get_pool_key(pool_id, is_enabled=False) ovn_lb = self._find_ovn_lb_with_pool_key(pool_key) return pool_key, ovn_lb def _get_subnet_from_pool(self, pool_id): pool = self._octavia_driver_lib.get_pool(pool_id) if not pool: return lb = self._octavia_driver_lib.get_loadbalancer(pool.loadbalancer_id) if lb and lb.vip_subnet_id: return lb.vip_subnet_id def _execute_commands(self, commands): with self.ovn_nbdb_api.transaction(check_error=True) as txn: for command in commands: txn.add(command) def _update_lb_to_ls_association(self, ovn_lb, network_id=None, subnet_id=None, associate=True): """Update LB association with Logical Switch This function deals with updating the References of Logical Switch in LB and addition of LB to LS. """ commands = [] if not network_id and not subnet_id: return commands if network_id: ls_name = utils.ovn_name(network_id) else: neutron_client = clients.get_neutron_client() try: subnet = neutron_client.show_subnet(subnet_id) ls_name = utils.ovn_name(subnet['subnet']['network_id']) except n_exc.NotFound: LOG.warning('Subnet %s not found while trying to ' 'fetch its data.', subnet_id) ls_name = None ovn_ls = None if ls_name: try: ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( check_error=True) except idlutils.RowNotFound: LOG.warning("LogicalSwitch %s could not be found.", ls_name) if associate: LOG.warning('Cannot associate LB %(lb)s to ' 'LS %(ls)s because LS row ' 'not found in OVN NBDB. Exiting.', {'ls': ls_name, 'lb': ovn_lb.name}) return commands ovn_ls = None ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) if ls_refs: try: ls_refs = jsonutils.loads(ls_refs) except ValueError: ls_refs = {} else: ls_refs = {} if associate and ls_name: if ls_name in ls_refs: ref_ct = ls_refs[ls_name] ls_refs[ls_name] = ref_ct + 1 else: ls_refs[ls_name] = 1 if ovn_ls: commands.append(self.ovn_nbdb_api.ls_lb_add( ovn_ls.uuid, ovn_lb.uuid, may_exist=True)) else: if ls_name not in ls_refs: # Nothing to be done. return commands ref_ct = ls_refs[ls_name] if ref_ct == 1: del ls_refs[ls_name] if ovn_ls: commands.append(self.ovn_nbdb_api.ls_lb_del( ovn_ls.uuid, ovn_lb.uuid, if_exists=True)) else: ls_refs[ls_name] = ref_ct - 1 ls_refs = {ovn_const.LB_EXT_IDS_LS_REFS_KEY: jsonutils.dumps(ls_refs)} commands.append(self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', ls_refs))) return commands def _del_lb_to_lr_association(self, ovn_lb, ovn_lr, lr_ref): commands = [] if lr_ref: try: lr_ref = [r for r in [lr.strip() for lr in lr_ref.split(',')] if r != ovn_lr.name] except ValueError: LOG.warning('The loadbalancer %(lb)s is not associated with ' 'the router %(router)s', {'lb': ovn_lb.name, 'router': ovn_lr.name}) if lr_ref: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', {ovn_const.LB_EXT_IDS_LR_REF_KEY: ','.join(lr_ref)}))) else: commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (ovn_const.LB_EXT_IDS_LR_REF_KEY))) commands.append( self.ovn_nbdb_api.lr_lb_del(ovn_lr.uuid, ovn_lb.uuid, if_exists=True)) for net in self._find_ls_for_lr(ovn_lr): commands.append(self.ovn_nbdb_api.ls_lb_del( net, ovn_lb.uuid, if_exists=True)) return commands def _add_lb_to_lr_association(self, ovn_lb, ovn_lr, lr_rf): commands = [] commands.append( self.ovn_nbdb_api.lr_lb_add(ovn_lr.uuid, ovn_lb.uuid, may_exist=True)) for net in self._find_ls_for_lr(ovn_lr): commands.append(self.ovn_nbdb_api.ls_lb_add( net, ovn_lb.uuid, may_exist=True)) if ovn_lr.name not in str(lr_rf): # Multiple routers in lr_rf are separated with ',' if lr_rf: lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY: f"{lr_rf},{ovn_lr.name}"} else: lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY: ovn_lr.name} commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', lr_rf))) return commands def _update_lb_to_lr_association(self, ovn_lb, ovn_lr, delete=False): lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY) if delete: return self._del_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref) else: return self._add_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref) def _find_ls_for_lr(self, router): neutron_client = clients.get_neutron_client() ls = [] for port in router.ports: if port.gateway_chassis: continue sids = port.external_ids.get( ovn_const.OVN_SUBNET_EXT_IDS_KEY, '').split(' ') for sid in sids: try: subnet = neutron_client.show_subnet(sid) ls.append(utils.ovn_name(subnet['subnet']['network_id'])) except n_exc.NotFound: LOG.exception('Subnet %s not found while trying to ' 'fetch its data.', sid) return ls def _find_lr_of_ls(self, ovn_ls, subnet_gateway_ip=None): lsp_router_port = None for port in ovn_ls.ports or []: if (port.type == 'router' and port.external_ids.get( ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY) == n_const.DEVICE_OWNER_ROUTER_INTF): if subnet_gateway_ip: port_cidr = netaddr.IPNetwork( port.external_ids[ ovn_const.OVN_PORT_CIDR_EXT_ID_KEY]).ip if netaddr.IPAddress(subnet_gateway_ip) != port_cidr: continue lsp_router_port = port break else: return lrp_name = lsp_router_port.options.get('router-port') if not lrp_name: return lrs = self.ovn_nbdb_api.get_lrs().execute(check_error=True) for lr in lrs: for lrp in lr.ports: if lrp.name == lrp_name: return lr # Handles networks with only gateway port in the router if (utils.ovn_lrouter_port_name( lr.external_ids.get(ovn_const.OVN_GW_PORT_EXT_ID_KEY)) == lrp_name): return lr def _get_listener_key(self, listener_id, is_enabled=True): listener_key = ovn_const.LB_EXT_IDS_LISTENER_PREFIX + str(listener_id) if not is_enabled: listener_key += ':' + ovn_const.DISABLED_RESOURCE_SUFFIX return listener_key def _get_pool_key(self, pool_id, is_enabled=True): pool_key = ovn_const.LB_EXT_IDS_POOL_PREFIX + str(pool_id) if not is_enabled: pool_key += ':' + ovn_const.DISABLED_RESOURCE_SUFFIX return pool_key def _extract_member_info(self, member): mem_info = [] if member: for mem in member.split(','): mem_split = mem.split('_') mem_ip_port = mem_split[2] mem_ip, mem_port = mem_ip_port.rsplit(':', 1) mem_subnet = mem_split[3] mem_info.append((mem_ip, mem_port, mem_subnet)) return mem_info def _get_member_info(self, member): member_info = '' if isinstance(member, dict): subnet_id = member.get(constants.SUBNET_ID, '') member_info = ( f'{ovn_const.LB_EXT_IDS_MEMBER_PREFIX}{member[constants.ID]}_' f'{member[constants.ADDRESS]}:' f'{member[constants.PROTOCOL_PORT]}_{subnet_id}') elif isinstance(member, o_datamodels.Member): subnet_id = member.subnet_id or '' member_info = ( f'{ovn_const.LB_EXT_IDS_MEMBER_PREFIX}{member.member_id}_' f'{member.address}:{member.protocol_port}_{subnet_id}') return member_info def _make_listener_key_value(self, listener_port, pool_id): return str(listener_port) + ':' + pool_id def _extract_listener_key_value(self, listener_value): v = listener_value.split(':') if len(v) == 2: return (v[0], v[1]) else: return (None, None) def _is_listener_disabled(self, listener_key): v = listener_key.split(':') if len(v) == 2 and v[1] == ovn_const.DISABLED_RESOURCE_SUFFIX: return True return False def _get_pool_listeners(self, ovn_lb, pool_key): pool_listeners = [] for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k: continue vip_port, p_key = self._extract_listener_key_value(v) if pool_key == p_key: pool_listeners.append( k[len(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):]) return pool_listeners def _get_pool_listener_port(self, ovn_lb, pool_key): for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k: continue vip_port, p_key = self._extract_listener_key_value(v) if pool_key == p_key: return vip_port return None def _frame_vip_ips(self, lb_external_ids): vip_ips = {} # If load balancer is disabled, return if lb_external_ids.get('enabled') == 'False': return vip_ips lb_vip = lb_external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] vip_fip = lb_external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) for k, v in lb_external_ids.items(): if (ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k or self._is_listener_disabled(k)): continue vip_port, pool_id = self._extract_listener_key_value(v) if not vip_port or not pool_id: continue if pool_id not in lb_external_ids or not lb_external_ids[pool_id]: continue ips = [] for member_ip, member_port, subnet in self._extract_member_info( lb_external_ids[pool_id]): if netaddr.IPNetwork(member_ip).version == 6: ips.append(f'[{member_ip}]:{member_port}') else: ips.append(f'{member_ip}:{member_port}') if netaddr.IPNetwork(lb_vip).version == 6: lb_vip = f'[{lb_vip}]' vip_ips[lb_vip + ':' + vip_port] = ','.join(ips) if vip_fip: if netaddr.IPNetwork(vip_fip).version == 6: vip_fip = f'[{vip_fip}]' vip_ips[vip_fip + ':' + vip_port] = ','.join(ips) return vip_ips def _refresh_lb_vips(self, ovn_lb_uuid, lb_external_ids): vip_ips = self._frame_vip_ips(lb_external_ids) return [self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb_uuid, 'vips'), self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb_uuid, ('vips', vip_ips))] def _is_listener_in_lb(self, lb): for key in list(lb.external_ids): if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX): return True return False def _are_selection_fields_supported(self): return self.ovn_nbdb_api.is_col_present( 'Load_Balancer', 'selection_fields') @staticmethod def _get_selection_keys(lb_algorithm): # pylint: disable=multiple-statements return ovn_const.LB_SELECTION_FIELDS_MAP[lb_algorithm] def check_lb_protocol(self, lb_id, listener_protocol): ovn_lb = self._find_ovn_lbs(lb_id, protocol=listener_protocol) if not ovn_lb: return False elif not self._is_listener_in_lb(ovn_lb): return True else: return str(listener_protocol).lower() in ovn_lb.protocol def lb_create(self, loadbalancer, protocol=None): port = None subnet = {} neutron_client = clients.get_neutron_client() if loadbalancer.get(constants.VIP_PORT_ID): # In case we don't have vip_network_id port = neutron_client.show_port( loadbalancer[constants.VIP_PORT_ID])['port'] for ip in port['fixed_ips']: if ip['ip_address'] == loadbalancer[constants.VIP_ADDRESS]: subnet = neutron_client.show_subnet( ip['subnet_id'])['subnet'] break elif (loadbalancer.get(constants.VIP_NETWORK_ID) and loadbalancer.get(constants.VIP_ADDRESS)): ports = neutron_client.list_ports( network_id=loadbalancer[constants.VIP_NETWORK_ID]) for p in ports['ports']: for ip in p['fixed_ips']: if ip['ip_address'] == loadbalancer[constants.VIP_ADDRESS]: port = p subnet = neutron_client.show_subnet( ip['subnet_id'])['subnet'] break # If protocol set make sure its lowercase protocol = protocol.lower() if protocol else [] # In case port is not found for the vip_address we will see an # exception when port['id'] is accessed. external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: loadbalancer[constants.VIP_ADDRESS], ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: loadbalancer.get(constants.VIP_PORT_ID) or port['id'], 'enabled': str(loadbalancer[constants.ADMIN_STATE_UP])} # In case vip_fip was passed - use it. vip_fip = loadbalancer.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) if vip_fip: external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = vip_fip # In case of lr_ref passed - use it. lr_ref = loadbalancer.get(ovn_const.LB_EXT_IDS_LR_REF_KEY) if lr_ref: external_ids[ovn_const.LB_EXT_IDS_LR_REF_KEY] = lr_ref # In case we have LB algoritm set lb_algorithm = loadbalancer.get(constants.LB_ALGORITHM) kwargs = { 'name': loadbalancer[constants.ID], 'protocol': protocol, 'external_ids': external_ids} if self._are_selection_fields_supported(): kwargs['selection_fields'] = self._get_selection_keys(lb_algorithm) try: self.ovn_nbdb_api.db_create( 'Load_Balancer', **kwargs).execute(check_error=True) ovn_lb = self._find_ovn_lbs( loadbalancer[constants.ID], protocol=protocol) ovn_lb = ovn_lb if protocol else ovn_lb[0] commands = self._update_lb_to_ls_association( ovn_lb, network_id=port['network_id'], associate=True) ls_name = utils.ovn_name(port['network_id']) ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( check_error=True) ovn_lr = self._find_lr_of_ls(ovn_ls, subnet.get('gateway_ip')) if ovn_lr: commands.extend(self._update_lb_to_lr_association( ovn_lb, ovn_lr)) # NOTE(mjozefcz): In case of LS references where passed - # apply LS to the new LB. That could happend in case we # need another loadbalancer for other L4 protocol. ls_refs = loadbalancer.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) if ls_refs: try: ls_refs = jsonutils.loads(ls_refs) except ValueError: ls_refs = {} for ls in ls_refs: # Skip previously added LS because we don't want # to duplicate. if ls == ovn_ls.name: continue commands.extend(self._update_lb_to_ls_association( ovn_lb, network_id=ls.replace('neutron-', ''), associate=True)) self._execute_commands(commands) operating_status = constants.ONLINE # The issue is that since OVN doesnt support any HMs, # we ideally should never put the status as 'ONLINE' if not loadbalancer.get(constants.ADMIN_STATE_UP, True): operating_status = constants.OFFLINE status = { constants.LOADBALANCERS: [ {constants.ID: loadbalancer[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: operating_status}]} # If the connection with the OVN NB db server is broken, then # ovsdbapp will throw either TimeOutException or RunTimeError. # May be we can catch these specific exceptions. # It is important to report the status to octavia. We can report # immediately or reschedule the lb_create request later. # For now lets report immediately. except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "creation of loadbalancer") # Any Exception set the status to ERROR if isinstance(port, dict): self.delete_vip_port(port.get('id')) LOG.warning("Deleting the VIP port %s since LB went into " "ERROR state", str(port.get('id'))) status = { constants.LOADBALANCERS: [ {constants.ID: loadbalancer[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR, constants.OPERATING_STATUS: constants.ERROR}]} return status def lb_delete(self, loadbalancer): port_id = None lbalancer_status = { constants.ID: loadbalancer[constants.ID], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: constants.OFFLINE} status = { constants.LOADBALANCERS: [lbalancer_status], constants.LISTENERS: [], constants.POOLS: [], constants.MEMBERS: []} ovn_lbs = None try: ovn_lbs = self._find_ovn_lbs(loadbalancer[constants.ID]) except idlutils.RowNotFound: LOG.warning("Loadbalancer %s not found in OVN Northbound DB. " "Setting the Loadbalancer status to DELETED " "in Octavia", str(loadbalancer[constants.ID])) return status try: port_id = ovn_lbs[0].external_ids[ ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY] for ovn_lb in ovn_lbs: status = self._lb_delete(loadbalancer, ovn_lb, status) # Clear the status dict of any key having [] value # Python 3.6 doesnt allow deleting an element in a # dict while iterating over it. So first get a list of keys. # https://cito.github.io/blog/never-iterate-a-changing-dict/ status = {key: value for key, value in status.items() if value} except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of loadbalancer") lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR lbalancer_status[constants.OPERATING_STATUS] = constants.ERROR # Delete VIP port from neutron. self.delete_vip_port(port_id) return status def _lb_delete(self, loadbalancer, ovn_lb, status): commands = [] if loadbalancer['cascade']: # Delete all pools for key, value in ovn_lb.external_ids.items(): if key.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX): pool_id = key.split('_')[1] # Delete all members in the pool if value and len(value.split(',')) > 0: for mem_info in value.split(','): status[constants.MEMBERS].append({ constants.ID: mem_info.split('_')[1], constants.PROVISIONING_STATUS: constants.DELETED}) status[constants.POOLS].append( {constants.ID: pool_id, constants.PROVISIONING_STATUS: constants.DELETED}) if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX): status[constants.LISTENERS].append({ constants.ID: key.split('_')[1], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: constants.OFFLINE}) ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY, {}) if ls_refs: try: ls_refs = jsonutils.loads(ls_refs) except ValueError: ls_refs = {} for ls_name in ls_refs.keys(): try: ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( check_error=True) commands.append( self.ovn_nbdb_api.ls_lb_del(ovn_ls.uuid, ovn_lb.uuid)) except idlutils.RowNotFound: LOG.warning("LogicalSwitch %s could not be found. Cannot " "delete Load Balancer from it", ls_name) # Delete LB from all Networks the LB is indirectly associated for ls in self._find_lb_in_table(ovn_lb, 'Logical_Switch'): commands.append( self.ovn_nbdb_api.ls_lb_del(ls.uuid, ovn_lb.uuid, if_exists=True)) lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY, {}) if lr_ref: try: lr = self.ovn_nbdb_api.lookup('Logical_Router', lr_ref) commands.append(self.ovn_nbdb_api.lr_lb_del( lr.uuid, ovn_lb.uuid)) except idlutils.RowNotFound: pass # Delete LB from all Routers the LB is indirectly associated for lr in self._find_lb_in_table(ovn_lb, 'Logical_Router'): commands.append( self.ovn_nbdb_api.lr_lb_del(lr.uuid, ovn_lb.uuid, if_exists=True)) commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid)) self._execute_commands(commands) return status def lb_update(self, loadbalancer): lb_status = {constants.ID: loadbalancer[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE} status = {constants.LOADBALANCERS: [lb_status]} if constants.ADMIN_STATE_UP not in loadbalancer: return status lb_enabled = loadbalancer[constants.ADMIN_STATE_UP] try: ovn_lbs = self._find_ovn_lbs(loadbalancer[constants.ID]) # It should be unique for all the LBS for all protocols, # so we could just easly loop over all defined for given # Octavia LB. for ovn_lb in ovn_lbs: if str(ovn_lb.external_ids['enabled']) != str(lb_enabled): commands = [] enable_info = {'enabled': str(lb_enabled)} ovn_lb.external_ids['enabled'] = str(lb_enabled) commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', enable_info)) ) commands.extend( self._refresh_lb_vips(ovn_lb.uuid, ovn_lb.external_ids)) self._execute_commands(commands) if lb_enabled: operating_status = constants.ONLINE else: operating_status = constants.OFFLINE lb_status[constants.OPERATING_STATUS] = operating_status except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "update of loadbalancer") lb_status[constants.PROVISIONING_STATUS] = constants.ERROR lb_status[constants.OPERATING_STATUS] = constants.ERROR return status def listener_create(self, listener): ovn_lb = self._get_or_create_ovn_lb( listener[constants.LOADBALANCER_ID], listener[constants.PROTOCOL], listener[constants.ADMIN_STATE_UP]) external_ids = copy.deepcopy(ovn_lb.external_ids) listener_key = self._get_listener_key( listener[constants.ID], is_enabled=listener[constants.ADMIN_STATE_UP]) if listener.get(constants.DEFAULT_POOL_ID): pool_key = self._get_pool_key(listener[constants.DEFAULT_POOL_ID]) else: pool_key = '' external_ids[listener_key] = self._make_listener_key_value( listener[constants.PROTOCOL_PORT], pool_key) listener_info = {listener_key: external_ids[listener_key]} try: commands = [] commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', listener_info))) if not self._is_listener_in_lb(ovn_lb): commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('protocol', str(listener[constants.PROTOCOL]).lower()))) commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids)) self._execute_commands(commands) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "creation of listener") status = { constants.LISTENERS: [ {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR, constants.OPERATING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} return status operating_status = constants.ONLINE if not listener.get(constants.ADMIN_STATE_UP, True): operating_status = constants.OFFLINE if (ovn_lb.health_check and not self._update_hm_vip(ovn_lb, listener[constants.PROTOCOL_PORT])): operating_status = constants.ERROR status = { constants.LISTENERS: [ {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: operating_status}], constants.LOADBALANCERS: [ {constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} return status def listener_delete(self, listener): status = { constants.LISTENERS: [ {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: constants.OFFLINE}], constants.LOADBALANCERS: [ {constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} try: ovn_lb = self._find_ovn_lbs( listener[constants.LOADBALANCER_ID], protocol=listener[constants.PROTOCOL]) except idlutils.RowNotFound: # Listener already deleted. return status external_ids = copy.deepcopy(ovn_lb.external_ids) listener_key = self._get_listener_key(listener[constants.ID]) if listener_key in external_ids: try: commands = [] commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (listener_key))) # Drop current listener from LB. del external_ids[listener_key] # Set LB protocol to undefined only if there are no more # listeners and pools defined in the LB. cmds, lb_to_delete = self._clean_lb_if_empty( ovn_lb, listener[constants.LOADBALANCER_ID], external_ids) commands.extend(cmds) # Do not refresh vips if OVN LB for given protocol # has pending delete operation. if not lb_to_delete: commands.extend( self._refresh_lb_vips(ovn_lb.uuid, external_ids)) self._execute_commands(commands) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of listener") status = { constants.LISTENERS: [ {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR, constants.OPERATING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} return status def listener_update(self, listener): # NOTE(mjozefcz): Based on # https://docs.openstack.org/api-ref/load-balancer/v2/?expanded=update-a-listener-detail # there is no possibility to update listener protocol or port. listener_status = {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE} lbalancer_status = { constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE} pool_status = [] status = { constants.LISTENERS: [listener_status], constants.LOADBALANCERS: [lbalancer_status], constants.POOLS: pool_status} try: ovn_lb = self._find_ovn_lbs( listener[constants.LOADBALANCER_ID], protocol=listener[constants.PROTOCOL]) except idlutils.RowNotFound: LOG.exception(ovn_const.EXCEPTION_MSG, "update of listener") # LB row not found during update of a listener. That is a problem. listener_status[constants.PROVISIONING_STATUS] = constants.ERROR lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR return status l_key_when_enabled = self._get_listener_key(listener[constants.ID]) l_key_when_disabled = self._get_listener_key( listener[constants.ID], is_enabled=False) external_ids = copy.deepcopy(ovn_lb.external_ids) if constants.ADMIN_STATE_UP not in listener and ( constants.DEFAULT_POOL_ID not in listener): return status l_key_to_add = {} if l_key_when_enabled in external_ids: present_l_key = l_key_when_enabled elif l_key_when_disabled in external_ids: present_l_key = l_key_when_disabled else: # Something is terribly wrong. This cannot happen. return status try: commands = [] new_l_key = None l_key_to_remove = None if constants.ADMIN_STATE_UP in listener: if listener[constants.ADMIN_STATE_UP]: # We need to enable the listener new_l_key = l_key_when_enabled listener_status[constants.OPERATING_STATUS] = ( constants.ONLINE) else: # We need to disable the listener new_l_key = l_key_when_disabled listener_status[constants.OPERATING_STATUS] = ( constants.OFFLINE) if present_l_key != new_l_key: external_ids[new_l_key] = external_ids[present_l_key] l_key_to_add[new_l_key] = external_ids[present_l_key] del external_ids[present_l_key] l_key_to_remove = present_l_key if l_key_to_remove: commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (l_key_to_remove))) else: new_l_key = present_l_key if constants.DEFAULT_POOL_ID in listener: pool_key = self._get_pool_key( listener[constants.DEFAULT_POOL_ID]) l_key_value = self._make_listener_key_value( listener[constants.PROTOCOL_PORT], pool_key) l_key_to_add[new_l_key] = l_key_value external_ids[new_l_key] = l_key_value pool_status.append( {constants.ID: listener[constants.DEFAULT_POOL_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}) if l_key_to_add: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', l_key_to_add))) commands.extend( self._refresh_lb_vips(ovn_lb.uuid, external_ids)) self._execute_commands(commands) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "update of listener") status = { constants.LISTENERS: [ {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} return status def pool_create(self, pool): ovn_lb = self._get_or_create_ovn_lb( pool[constants.LOADBALANCER_ID], pool[constants.PROTOCOL], pool[constants.ADMIN_STATE_UP], lb_algorithm=pool[constants.LB_ALGORITHM]) external_ids = copy.deepcopy(ovn_lb.external_ids) pool_key = self._get_pool_key( pool[constants.ID], is_enabled=pool[constants.ADMIN_STATE_UP]) external_ids[pool_key] = '' if pool[constants.LISTENER_ID]: listener_key = self._get_listener_key(pool[constants.LISTENER_ID]) if listener_key in ovn_lb.external_ids: external_ids[listener_key] = str( external_ids[listener_key]) + str(pool_key) try: self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', external_ids)).execute(check_error=True) # Pool status will be set to Online after a member is added to it # or when it is created with listener. operating_status = constants.OFFLINE if pool[constants.LISTENER_ID]: operating_status = constants.ONLINE status = { constants.POOLS: [ {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: operating_status}], constants.LOADBALANCERS: [ {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} if pool[constants.LISTENER_ID]: listener_status = [ {constants.ID: pool[constants.LISTENER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}] status[constants.LISTENERS] = listener_status except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "creation of pool") status = { constants.POOLS: [ {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} if pool[constants.LISTENER_ID]: listener_status = [ {constants.ID: pool[constants.LISTENER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}] status[constants.LISTENERS] = listener_status return status def pool_delete(self, pool): status = { constants.POOLS: [ {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.DELETED}], constants.LOADBALANCERS: [ {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} try: ovn_lb = self._find_ovn_lbs( pool[constants.LOADBALANCER_ID], pool[constants.PROTOCOL]) except idlutils.RowNotFound: # LB row not found that means pool is deleted. return status pool_key = self._get_pool_key(pool[constants.ID]) commands = [] external_ids = copy.deepcopy(ovn_lb.external_ids) pool_listeners = [] try: pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) if pool_key in ovn_lb.external_ids: commands.append( self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid, 'external_ids', (pool_key))) del external_ids[pool_key] commands.extend( self._refresh_lb_vips(ovn_lb.uuid, external_ids)) # Remove Pool from Listener if it is associated for key, value in ovn_lb.external_ids.items(): if (key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX) and pool_key in value): external_ids[key] = value.split(':')[0] + ':' commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', external_ids))) pool_key_when_disabled = self._get_pool_key(pool[constants.ID], is_enabled=False) if pool_key_when_disabled in ovn_lb.external_ids: commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (pool_key_when_disabled))) commands.extend( self._clean_lb_if_empty( ovn_lb, pool[constants.LOADBALANCER_ID], external_ids)[0]) self._execute_commands(commands) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of pool") status = { constants.POOLS: [ {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} listener_status = [] for listener in pool_listeners: listener_status.append( {constants.ID: listener, constants.PROVISIONING_STATUS: constants.ACTIVE}) status[constants.LISTENERS] = listener_status return status def pool_update(self, pool): pool_status = {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE} lbalancer_status = {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE} status = { constants.POOLS: [pool_status], constants.LOADBALANCERS: [lbalancer_status]} if constants.ADMIN_STATE_UP not in pool: return status try: ovn_lb = self._find_ovn_lbs( pool[constants.LOADBALANCER_ID], protocol=pool[constants.PROTOCOL]) except idlutils.RowNotFound: LOG.exception(ovn_const.EXCEPTION_MSG, "update of pool") # LB row not found during update of a listener. That is a problem. pool_status[constants.PROVISIONING_STATUS] = constants.ERROR lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR return status pool_key = self._get_pool_key(pool[constants.ID]) p_key_when_disabled = self._get_pool_key(pool[constants.ID], is_enabled=False) external_ids = copy.deepcopy(ovn_lb.external_ids) p_key_to_remove = None p_key_to_add = {} pool_listeners = [] try: pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) if pool[constants.ADMIN_STATE_UP]: if p_key_when_disabled in external_ids: p_key_to_add[pool_key] = external_ids[p_key_when_disabled] external_ids[pool_key] = external_ids[p_key_when_disabled] del external_ids[p_key_when_disabled] p_key_to_remove = p_key_when_disabled else: if pool_key in external_ids: p_key_to_add[p_key_when_disabled] = external_ids[pool_key] external_ids[p_key_when_disabled] = external_ids[pool_key] del external_ids[pool_key] p_key_to_remove = pool_key if p_key_to_remove: commands = [] commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (p_key_to_remove))) commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', p_key_to_add))) commands.extend( self._refresh_lb_vips(ovn_lb.uuid, external_ids)) self._execute_commands(commands) if pool[constants.ADMIN_STATE_UP]: operating_status = constants.ONLINE else: operating_status = constants.OFFLINE pool_status[constants.OPERATING_STATUS] = operating_status except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "update of pool") status = { constants.POOLS: [ {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} listener_status = [] for listener in pool_listeners: listener_status.append( {constants.ID: listener, constants.PROVISIONING_STATUS: constants.ACTIVE}) status[constants.LISTENERS] = listener_status return status def _add_member(self, member, ovn_lb, pool_key): external_ids = copy.deepcopy(ovn_lb.external_ids) existing_members = external_ids[pool_key] if existing_members: existing_members = existing_members.split(",") member_info = self._get_member_info(member) if member_info in existing_members: # Member already present return None if existing_members: existing_members.append(member_info) pool_data = {pool_key: ",".join(existing_members)} else: pool_data = {pool_key: member_info} commands = [] commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', pool_data))) external_ids[pool_key] = pool_data[pool_key] commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids)) subnet_id = member[constants.SUBNET_ID] commands.extend( self._update_lb_to_ls_association( ovn_lb, subnet_id=subnet_id, associate=True)) # Make sure that all logical switches related to logical router # are associated with the load balancer. This is needed to handle # potential race that happens when lrp and lb are created at the # same time. neutron_client = clients.get_neutron_client() try: subnet = neutron_client.show_subnet(subnet_id) ls_name = utils.ovn_name(subnet['subnet']['network_id']) ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( check_error=True) ovn_lr = self._find_lr_of_ls( ovn_ls, subnet['subnet'].get('gateway_ip')) if ovn_lr: commands.extend(self._update_lb_to_lr_association( ovn_lb, ovn_lr)) except n_exc.NotFound: pass except idlutils.RowNotFound: pass self._execute_commands(commands) return member_info def member_create(self, member): new_member = None pool_listeners = [] try: pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( member[constants.POOL_ID]) pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) new_member = self._add_member(member, ovn_lb, pool_key) pool = {constants.ID: member[constants.POOL_ID], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: constants.ONLINE} member_status = {constants.ID: member[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE} if not member[constants.ADMIN_STATE_UP]: member_status[constants.OPERATING_STATUS] = constants.OFFLINE status = { constants.POOLS: [pool], constants.MEMBERS: [member_status], constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}]} except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "creation of member") status = { constants.POOLS: [ {constants.ID: member[constants.POOL_ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.MEMBERS: [ {constants.ID: member[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}]} listener_status = [] for listener in pool_listeners: listener_status.append( {constants.ID: listener, constants.PROVISIONING_STATUS: constants.ACTIVE}) status[constants.LISTENERS] = listener_status if new_member and ovn_lb.health_check: operating_status = constants.ONLINE if not self._update_hm_members(ovn_lb, pool_key): operating_status = constants.ERROR member_status[constants.OPERATING_STATUS] = operating_status return status def _remove_member(self, member, ovn_lb, pool_key): external_ids = copy.deepcopy(ovn_lb.external_ids) existing_members = external_ids[pool_key].split(",") member_info = self._get_member_info(member) if member_info in existing_members: commands = [] existing_members.remove(member_info) if not existing_members: pool_status = constants.OFFLINE else: pool_status = constants.ONLINE pool_data = {pool_key: ",".join(existing_members)} commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', pool_data))) external_ids[pool_key] = ",".join(existing_members) commands.extend( self._refresh_lb_vips(ovn_lb.uuid, external_ids)) commands.extend( self._update_lb_to_ls_association( ovn_lb, subnet_id=member.get(constants.SUBNET_ID), associate=False)) self._execute_commands(commands) return pool_status else: msg = f"Member {member[constants.ID]} not found in the pool" raise driver_exceptions.DriverError( user_fault_string=msg, operator_fault_string=msg) def member_delete(self, member): pool_listeners = [] try: pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( member[constants.POOL_ID]) pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) pool_status = self._remove_member(member, ovn_lb, pool_key) pool = {constants.ID: member[constants.POOL_ID], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: pool_status} if pool_status == constants.ONLINE and ovn_lb.health_check: self._update_hm_members(ovn_lb, pool_key) status = { constants.POOLS: [pool], constants.MEMBERS: [ {constants.ID: member[constants.ID], constants.PROVISIONING_STATUS: constants.DELETED}], constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}]} except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of member") status = { constants.POOLS: [ {constants.ID: member[constants.POOL_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.MEMBERS: [ {constants.ID: member[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}]} listener_status = [] for listener in pool_listeners: listener_status.append( {constants.ID: listener, constants.PROVISIONING_STATUS: constants.ACTIVE}) status[constants.LISTENERS] = listener_status return status def _update_member(self, member, ovn_lb, pool_key): commands = [] external_ids = copy.deepcopy(ovn_lb.external_ids) existing_members = external_ids[pool_key].split(",") member_info = self._get_member_info(member) for mem in existing_members: if (member_info.split('_')[1] == mem.split('_')[1] and mem != member_info): existing_members.remove(mem) existing_members.append(member_info) pool_data = {pool_key: ",".join(existing_members)} commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', pool_data))) external_ids[pool_key] = ",".join(existing_members) commands.extend( self._refresh_lb_vips(ovn_lb.uuid, external_ids)) self._execute_commands(commands) def member_update(self, member): pool_listeners = [] try: pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( member[constants.POOL_ID]) member_status = {constants.ID: member[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE} status = { constants.POOLS: [ {constants.ID: member[constants.POOL_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.MEMBERS: [member_status], constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}]} pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) self._update_member(member, ovn_lb, pool_key) if constants.ADMIN_STATE_UP in member: if member[constants.ADMIN_STATE_UP]: old_admin_state_up = member.get('old_admin_state_up') if old_admin_state_up is None: exist_member = self._octavia_driver_lib.get_member( member[constants.ID]) if exist_member: old_admin_state_up = exist_member.admin_state_up if old_admin_state_up: member_status[constants.OPERATING_STATUS] = ( constants.ONLINE) else: # going from down to up should reflect NO_MONITOR state member_status[constants.OPERATING_STATUS] = ( constants.NO_MONITOR) else: member_status[constants.OPERATING_STATUS] = ( constants.OFFLINE) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "update of member") status = { constants.POOLS: [ {constants.ID: member[constants.POOL_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.MEMBERS: [ {constants.ID: member[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}]} listener_status = [] for listener in pool_listeners: listener_status.append( {constants.ID: listener, constants.PROVISIONING_STATUS: constants.ACTIVE}) status[constants.LISTENERS] = listener_status return status def _get_existing_pool_members(self, pool_id): pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id) if not ovn_lb: msg = _("Loadbalancer with pool %s does not exist") % pool_key raise driver_exceptions.DriverError(msg) external_ids = dict(ovn_lb.external_ids) return external_ids[pool_key] def get_pool_member_id(self, pool_id, mem_addr_port=None): '''Gets Member information :param pool_id: ID of the Pool whose member information is reqd. :param mem_addr_port: Combination of Member Address+Port. Default=None :returns: UUID -- ID of the Member if member exists in pool. :returns: None -- if no member exists in the pool :raises: Exception if Loadbalancer is not found for a Pool ID ''' existing_members = self._get_existing_pool_members(pool_id) # Members are saved in OVN in the form of # member1_UUID_IP:Port, member2_UUID_IP:Port # Match the IP:Port for all members with the mem_addr_port # information and return the UUID. for meminf in existing_members.split(','): if mem_addr_port == meminf.split('_')[2]: return meminf.split('_')[1] def create_vip_port(self, project_id, lb_id, vip_d): port = {'port': {'name': ovn_const.LB_VIP_PORT_PREFIX + str(lb_id), 'network_id': vip_d[constants.VIP_NETWORK_ID], 'fixed_ips': [{'subnet_id': vip_d['vip_subnet_id']}], 'admin_state_up': True, 'project_id': project_id}} try: port['port']['fixed_ips'][0]['ip_address'] = ( vip_d[constants.VIP_ADDRESS]) except KeyError: pass neutron_client = clients.get_neutron_client() try: return neutron_client.create_port(port) except n_exc.IpAddressAlreadyAllocatedClient as e: # Sometimes the VIP is already created (race-conditions) # Lets get the it from Neutron API. ports = neutron_client.list_ports( network_id=vip_d[constants.VIP_NETWORK_ID], name=f'{ovn_const.LB_VIP_PORT_PREFIX}{lb_id}') if not ports['ports']: LOG.error('Cannot create/get LoadBalancer VIP port with ' 'fixed IP: %s', vip_d[constants.VIP_ADDRESS]) raise e # there should only be one port returned port = ports['ports'][0] LOG.debug('VIP Port already exists, uuid: %s', port['id']) return {'port': port} def delete_vip_port(self, port_id): neutron_client = clients.get_neutron_client() try: neutron_client.delete_port(port_id) except n_exc.PortNotFoundClient: LOG.warning("Port %s could not be found. Please " "check Neutron logs. Perhaps port " "was already deleted.", port_id) def handle_vip_fip(self, fip_info): ovn_lb = fip_info['ovn_lb'] external_ids = copy.deepcopy(ovn_lb.external_ids) commands = [] if fip_info['action'] == ovn_const.REQ_INFO_ACTION_ASSOCIATE: external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = ( fip_info['vip_fip']) vip_fip_info = { ovn_const.LB_EXT_IDS_VIP_FIP_KEY: fip_info['vip_fip']} commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', vip_fip_info))) else: external_ids.pop(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (ovn_const.LB_EXT_IDS_VIP_FIP_KEY))) commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids)) self._execute_commands(commands) def handle_member_dvr(self, info): pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(info['pool_id']) if not ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY): LOG.debug("LB %(lb)s has no FIP on VIP configured. " "There is no need to centralize member %(member)s " "traffic.", {'lb': ovn_lb.uuid, 'member': info['id']}) return # Find out if member has FIP assigned. neutron_client = clients.get_neutron_client() try: subnet = neutron_client.show_subnet(info['subnet_id']) ls_name = utils.ovn_name(subnet['subnet']['network_id']) except n_exc.NotFound: LOG.exception('Subnet %s not found while trying to ' 'fetch its data.', info['subnet_id']) return try: ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name) except idlutils.RowNotFound: LOG.warning("Logical Switch %s not found. " "Cannot verify member FIP configuration.", ls_name) return fip = None f = utils.remove_macs_from_lsp_addresses for port in ls.ports: if info['address'] in f(port.addresses): # We found particular port fip = self.ovn_nbdb_api.db_find_rows( 'NAT', ('external_ids', '=', { ovn_const.OVN_FIP_PORT_EXT_ID_KEY: port.name}) ).execute(check_error=True) fip = fip[0] if fip else fip break if not fip: LOG.debug('Member %s has no FIP assigned. ' 'There is no need to modify its NAT.', info['id']) return if info['action'] == ovn_const.REQ_INFO_MEMBER_ADDED: LOG.info('Member %(member)s is added to Load Balancer %(lb)s ' 'and both have FIP assigned. Member FIP %(fip)s ' 'needs to be centralized in those conditions. ' 'Deleting external_mac/logical_port from it.', {'member': info['id'], 'lb': ovn_lb.uuid, 'fip': fip.external_ip}) self.ovn_nbdb_api.db_clear( 'NAT', fip.uuid, 'external_mac').execute(check_error=True) self.ovn_nbdb_api.db_clear( 'NAT', fip.uuid, 'logical_port').execute(check_error=True) else: LOG.info('Member %(member)s is deleted from Load Balancer ' '%(lb)s and both have FIP assigned. Member FIP %(fip)s ' 'can be decentralized now if environment has DVR ' 'enabled. Updating FIP object for recomputation.', {'member': info['id'], 'lb': ovn_lb.uuid, 'fip': fip.external_ip}) # NOTE(mjozefcz): We don't know if this env is DVR or not. # We should call neutron API to do 'empty' update of the FIP. # It will bump revision number and do recomputation of the FIP. try: fip_info = neutron_client.show_floatingip( fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY]) empty_update = { "floatingip": { 'description': fip_info['floatingip']['description']}} neutron_client.update_floatingip( fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY], empty_update) except n_exc.NotFound: LOG.warning('Member %(member)s FIP %(fip)s not found in ' 'Neutron. Cannot update it.', {'member': info['id'], 'fip': fip.external_ip}) def _get_member_lsp(self, member_ip, member_subnet_id): neutron_client = clients.get_neutron_client() try: member_subnet = neutron_client.show_subnet(member_subnet_id) except n_exc.NotFound: LOG.exception('Subnet %s not found while trying to ' 'fetch its data.', member_subnet_id) return ls_name = utils.ovn_name(member_subnet['subnet']['network_id']) try: ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name) except idlutils.RowNotFound: LOG.warning("Logical Switch %s not found.", ls_name) return f = utils.remove_macs_from_lsp_addresses for port in ls.ports: if member_ip in f(port.addresses): # We found particular port return port def _add_hm(self, ovn_lb, pool_key, info): hm_id = info[constants.ID] status = {constants.ID: hm_id, constants.PROVISIONING_STATUS: constants.ERROR, constants.OPERATING_STATUS: constants.ERROR} # Example # MONITOR_PRT = 80 # ID=$(ovn-nbctl --bare --column _uuid find # Load_Balancer_Health_Check vip="${LB_VIP_ADDR}\:${MONITOR_PRT}") # In our case the monitor port will be the members protocol port vip = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_KEY) if not vip: LOG.error("Could not find VIP for HM %s, LB external_ids: %s", hm_id, ovn_lb.external_ids) return status vip_port = self._get_pool_listener_port(ovn_lb, pool_key) if not vip_port: # This is not fatal as we can add it when a listener is created vip = [] else: vip = vip + ':' + vip_port # ovn-nbctl --wait=sb -- # set Load_Balancer_Health_Check ${ID} options:\"interval\"=6 -- # set Load_Balancer_Health_Check ${ID} options:\"timeoutl\"=2 -- # set Load_Balancer_Health_Check ${ID} options:\"success_count\"=1 -- # set Load_Balancer_Health_Check ${ID} options:\"failure_count\"=3 options = { 'interval': str(info['interval']), 'timeout': str(info['timeout']), 'success_count': str(info['success_count']), 'failure_count': str(info['failure_count'])} # This is to enable lookups by Octavia DB ID value external_ids = {ovn_const.LB_EXT_IDS_HM_KEY: hm_id} # Just seems like this needs ovsdbapp support, see: # ovsdbapp/schema/ovn_northbound/impl_idl.py - lb_add() # ovsdbapp/schema/ovn_northbound/commands.py - LbAddCommand() # then this could just be self.ovn_nbdb_api.lb_hm_add() kwargs = { 'vip': vip, 'options': options, 'external_ids': external_ids} operating_status = constants.ONLINE if not info['admin_state_up']: operating_status = constants.OFFLINE try: with self.ovn_nbdb_api.transaction(check_error=True) as txn: health_check = txn.add( self.ovn_nbdb_api.db_create( 'Load_Balancer_Health_Check', **kwargs)) txn.add(self.ovn_nbdb_api.db_add( 'Load_Balancer', ovn_lb.uuid, 'health_check', health_check)) status = {constants.ID: hm_id, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: operating_status} except Exception: # Any Exception will return ERROR status LOG.exception(ovn_const.EXCEPTION_MSG, "set of health check") return status def _update_hm_vip(self, ovn_lb, vip_port): hm = self._lookup_hm_by_id(ovn_lb.health_check) if not hm: LOG.error("Could not find HM with key: %s", ovn_lb.health_check) return False vip = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_KEY) if not vip: LOG.error("Could not find VIP for HM %s, LB external_ids: %s", hm.uuid, ovn_lb.external_ids) return False vip = vip + ':' + str(vip_port) commands = [] commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', hm.uuid, ('vip', vip))) self._execute_commands(commands) return True def _update_hm_members(self, ovn_lb, pool_key): mappings = {} # For each member, set it's HM for member_ip, member_port, member_subnet in self._extract_member_info( ovn_lb.external_ids[pool_key]): member_lsp = self._get_member_lsp(member_ip, member_subnet) if not member_lsp: LOG.error("Member %(member)s Logical_Switch_Port not found. " "Cannot create a Health Monitor for pool %(pool)s.", {'member': member_ip, 'pool': pool_key}) return False network_id = member_lsp.external_ids.get( ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY).split('neutron-')[1] hm_port = self._ensure_hm_ovn_port(network_id) if not hm_port: LOG.error("No port on network %(network)s available for " "health monitoring. Cannot create a Health Monitor " "for pool %(pool)s.", {'network': network_id, 'pool': pool_key}) return False hm_source_ip = None for fixed_ip in hm_port['fixed_ips']: if fixed_ip['subnet_id'] == member_subnet: hm_source_ip = fixed_ip['ip_address'] break if not hm_source_ip: LOG.error("No port on subnet %(subnet)s available for " "health monitoring member IP %(member)s. Cannot " "create a Health Monitor for pool %(pool)s.", {'subnet': member_subnet, 'member': member_ip, 'pool': pool_key}) return False # ovn-nbctl set load_balancer ${OVN_LB_ID} # ip_port_mappings:${MEMBER_IP}=${LSP_NAME_MEMBER}:${HEALTH_SRC} # where: # OVN_LB_ID: id of LB # MEMBER_IP: IP of member_lsp # HEALTH_SRC: source IP of hm_port # need output like this # vips: {"172.24.4.246:80"="10.0.0.10:80"} # ip_port_mappings: {"10.0.0.10"="ID:10.0.0.2"} # ip_port_mappings: {"MEMBER_IP"="LSP_NAME_MEMBER:HEALTH_SRC"} # OVN does not support IPv6 Health Checks, but we check anyways member_src = f'{member_lsp.name}:' if netaddr.IPNetwork(hm_source_ip).version == 6: member_src += f'[{hm_source_ip}]' else: member_src += f'i{hm_source_ip}' if netaddr.IPNetwork(member_ip).version == 6: member_ip = f'[{member_ip}]' mappings[member_ip] = member_src commands = [] commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('ip_port_mappings', mappings))) self._execute_commands(commands) return True def _lookup_hm_by_id(self, hm_id): hms = self.ovn_nbdb_api.db_list_rows( 'Load_Balancer_Health_Check').execute(check_error=True) for hm in hms: if (ovn_const.LB_EXT_IDS_HM_KEY in hm.external_ids and hm.external_ids[ovn_const.LB_EXT_IDS_HM_KEY] == hm_id): return hm raise idlutils.RowNotFound(table='Load_Balancer_Health_Check', col='external_ids', match=hm_id) def _lookup_lb_by_hm_id(self, hm_id): lbs = self.ovn_nbdb_api.db_find_rows( 'Load_Balancer', ('health_check', '=', [hm_id])).execute() return lbs[0] if lbs else None def _find_ovn_lb_from_hm_id(self, hm_id): try: hm = self._lookup_hm_by_id(hm_id) except idlutils.RowNotFound: LOG.debug("Loadbalancer health monitor %s not found!", hm_id) return None, None try: ovn_lb = self._lookup_lb_by_hm_id(hm.uuid) except idlutils.RowNotFound: LOG.debug("Loadbalancer not found with health_check %s !", hm.uuid) return hm, None return hm, ovn_lb def hm_create(self, info): status = { constants.HEALTHMONITORS: [ {constants.ID: info[constants.ID], constants.OPERATING_STATUS: constants.NO_MONITOR, constants.PROVISIONING_STATUS: constants.ERROR}]} pool_id = info[constants.POOL_ID] pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id) if not ovn_lb: LOG.debug("Could not find LB with pool id %s", pool_id) return status status[constants.LOADBALANCERS] = [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}] if pool_key not in ovn_lb.external_ids: # Returning early here will cause the pool to go into # PENDING_UPDATE state, which is not good LOG.error("Could not find pool with key %s, LB external_ids: %s", pool_key, ovn_lb.external_ids) status[constants.POOLS] = [ {constants.ID: pool_id, constants.OPERATING_STATUS: constants.OFFLINE}] return status status[constants.POOLS] = [ {constants.ID: pool_id, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: constants.ONLINE}] # Update status for all members in the pool member_status = [] existing_members = ovn_lb.external_ids[pool_key] if len(existing_members) > 0: for mem_info in existing_members.split(','): member_status.append({ constants.ID: mem_info.split('_')[1], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: constants.ONLINE}) status[constants.MEMBERS] = member_status # MONITOR_PRT = 80 # ovn-nbctl --wait=sb -- --id=@hc create Load_Balancer_Health_Check # vip="${LB_VIP_ADDR}\:${MONITOR_PRT}" -- add Load_Balancer # ${OVN_LB_ID} health_check @hc # options here are interval, timeout, failure_count and success_count # from info object passed-in hm_status = self._add_hm(ovn_lb, pool_key, info) if hm_status[constants.PROVISIONING_STATUS] == constants.ACTIVE: if not self._update_hm_members(ovn_lb, pool_key): hm_status[constants.PROVISIONING_STATUS] = constants.ERROR hm_status[constants.OPERATING_STATUS] = constants.ERROR status[constants.HEALTHMONITORS] = [hm_status] return status def hm_update(self, info): status = { constants.HEALTHMONITORS: [ {constants.ID: info[constants.ID], constants.OPERATING_STATUS: constants.ERROR, constants.PROVISIONING_STATUS: constants.ERROR}]} hm_id = info[constants.ID] pool_id = info[constants.POOL_ID] hm, ovn_lb = self._find_ovn_lb_from_hm_id(hm_id) if not hm: LOG.debug("Loadbalancer health monitor %s not found!", hm_id) return status if not ovn_lb: LOG.debug("Could not find LB with health monitor id %s", hm_id) # Do we really need to try this hard? pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id) if not ovn_lb: LOG.debug("Could not find LB with pool id %s", pool_id) return status options = { 'interval': str(info['interval']), 'timeout': str(info['timeout']), 'success_count': str(info['success_count']), 'failure_count': str(info['failure_count'])} commands = [] commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', hm.uuid, ('options', options))) self._execute_commands(commands) operating_status = constants.ONLINE if not info['admin_state_up']: operating_status = constants.OFFLINE status = { constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.POOLS: [ {constants.ID: pool_id, constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.HEALTHMONITORS: [ {constants.ID: info[constants.ID], constants.OPERATING_STATUS: operating_status, constants.PROVISIONING_STATUS: constants.ACTIVE}]} return status def hm_delete(self, info): hm_id = info[constants.ID] status = { constants.HEALTHMONITORS: [ {constants.ID: hm_id, constants.OPERATING_STATUS: constants.NO_MONITOR, constants.PROVISIONING_STATUS: constants.DELETED}]} hm, ovn_lb = self._find_ovn_lb_from_hm_id(hm_id) if not hm or not ovn_lb: LOG.debug("Loadbalancer Health Check %s not found in OVN " "Northbound DB. Setting the Loadbalancer Health " "Monitor status to DELETED in Octavia", hm_id) return status # Need to send pool info in status update to avoid immutable objects, # the LB should have this info pool_id = None for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_POOL_PREFIX in k: pool_id = k.split('_')[1] break # ovn-nbctl clear load_balancer ${OVN_LB_ID} ip_port_mappings # ovn-nbctl clear load_balancer ${OVN_LB_ID} health_check # TODO(haleyb) remove just the ip_port_mappings for this hm commands = [] commands.append( self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb.uuid, 'ip_port_mappings')) commands.append( self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid, 'health_check', hm.uuid)) commands.append( self.ovn_nbdb_api.db_destroy('Load_Balancer_Health_Check', hm.uuid)) self._execute_commands(commands) status = { constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.HEALTHMONITORS: [ {constants.ID: info[constants.ID], constants.OPERATING_STATUS: constants.NO_MONITOR, constants.PROVISIONING_STATUS: constants.DELETED}]} if pool_id: status[constants.POOLS] = [ {constants.ID: pool_id, constants.PROVISIONING_STATUS: constants.ACTIVE}] else: LOG.warning('Pool not found for load balancer %s, status ' 'update will have incomplete data', ovn_lb.name) return status def _get_lb_on_hm_event(self, row): """Get the Load Balancer information on a health_monitor event This function is called when the status of a member has been updated. Input: Service Monitor row which is coming from ServiceMonitorUpdateEvent. Output: A row from load_balancer table table matching the member for which the event was generated. Exception: RowNotFound exception can be generated. """ # ip_port_mappings: {"MEMBER_IP"="LSP_NAME_MEMBER:HEALTH_SRC"} # There could be more than one entry in ip_port_mappings! mappings = {} hm_source_ip = str(row.src_ip) member_ip = str(row.ip) member_src = f'{row.logical_port}:' if netaddr.IPNetwork(hm_source_ip).version == 6: member_src += f'[{hm_source_ip}]' else: member_src += f'{hm_source_ip}' if netaddr.IPNetwork(member_ip).version == 6: member_ip = f'[{member_ip}]' mappings[member_ip] = member_src lbs = self.ovn_nbdb_api.db_find_rows( 'Load_Balancer', (('ip_port_mappings', '=', mappings), ('protocol', '=', row.protocol))).execute() return lbs[0] if lbs else None def hm_update_event_handler(self, row): try: ovn_lb = self._get_lb_on_hm_event(row) except idlutils.RowNotFound: LOG.debug("Load balancer information not found") return if not ovn_lb: LOG.debug("Load balancer not found") return if row.protocol != ovn_lb.protocol: LOG.debug('Row protocol (%s) does not match LB protocol (%s)', row.protocol, ovn_lb.protocol) return request_info = {'ovn_lb': ovn_lb, 'ip': row.ip, 'port': str(row.port), 'status': row.status} self.add_request({'type': ovn_const.REQ_TYPE_HM_UPDATE_EVENT, 'info': request_info}) def _get_new_operating_statuses(self, ovn_lb, pool_id, member_id, member_status): # When a member's operating status changes, we have to determine # the correct operating_status to report back to Octavia. # For example: # # LB with Pool and 2 members # # member-1 goes offline # member-1 operating_status is ERROR # if Pool operating_status is ONLINE # Pool operating_status is DEGRADED # if LB operating_status is ONLINE # LB operating_status is DEGRADED # # member-2 then goes offline # member-2 operating_status is ERROR # Pool operating_status is ERROR # LB operating_status is ERROR # # The opposite would also have to happen. # # If there is only one member, the Pool and LB will reflect # the same status operating_status = member_status # Assume the best pool_status = constants.ONLINE lb_status = constants.ONLINE pool = self._octavia_driver_lib.get_pool(pool_id) if pool: pool_status = pool.operating_status lb = self._octavia_driver_lib.get_loadbalancer(ovn_lb.name) if lb: lb_status = lb.operating_status for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_POOL_PREFIX not in k: continue lb_pool_id = k.split('_')[1] if lb_pool_id != pool_id: continue existing_members = v.split(",") for mem in existing_members: # Ignore the passed member ID, we already know it's status mem_id = mem.split('_')[1] if mem_id != member_id: member = self._octavia_driver_lib.get_member(mem_id) # If the statuses are different it is degraded if member and member.operating_status != member_status: operating_status = constants.DEGRADED break # operating_status will either be ONLINE, ERROR or DEGRADED if operating_status == constants.ONLINE: if pool_status != constants.ONLINE: pool_status = constants.ONLINE if lb_status != constants.ONLINE: lb_status = constants.ONLINE elif operating_status == constants.ERROR: if pool_status == constants.ONLINE: pool_status = constants.ERROR if lb_status == constants.ONLINE: lb_status = constants.ERROR else: if pool_status == constants.ONLINE: pool_status = constants.DEGRADED if lb_status == constants.ONLINE: lb_status = constants.DEGRADED return lb_status, pool_status def hm_update_event(self, info): ovn_lb = info['ovn_lb'] # Lookup pool and member pool_id = None member_id = None for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_POOL_PREFIX not in k: continue for member_ip, member_port, subnet in self._extract_member_info(v): if info['ip'] != member_ip: continue if info['port'] != member_port: continue # match pool_id = k.split('_')[1] member_id = v.split('_')[1] break # found it in inner loop if member_id: break if not member_id: LOG.warning('Member for event not found, info: %s', info) return member_status = constants.ONLINE if info['status'] == ['offline']: member_status = constants.ERROR lb_status, pool_status = self._get_new_operating_statuses( ovn_lb, pool_id, member_id, member_status) status = { constants.POOLS: [ {constants.ID: pool_id, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: pool_status}], constants.MEMBERS: [ {constants.ID: member_id, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: member_status}], constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: lb_status}]} return status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/i18n.py0000664000175000017500000000140300000000000023163 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n as i18n _translators = i18n.TranslatorFactory(domain='octavia') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4525883 ovn-octavia-provider-2.0.0/ovn_octavia_provider/ovsdb/0000775000175000017500000000000000000000000023151 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/ovsdb/impl_idl_ovn.py0000664000175000017500000002340200000000000026177 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import contextlib from neutron_lib import exceptions as n_exc from oslo_log import log from ovsdbapp.backend import ovs_idl from ovsdbapp.backend.ovs_idl import command from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp.backend.ovs_idl import rowview from ovsdbapp.backend.ovs_idl import transaction as idl_trans from ovsdbapp.schema.ovn_northbound import impl_idl as nb_impl_idl from ovsdbapp.schema.ovn_southbound import impl_idl as sb_impl_idl import tenacity from ovn_octavia_provider.common import config from ovn_octavia_provider.common import exceptions as ovn_exc from ovn_octavia_provider.i18n import _ from ovn_octavia_provider.ovsdb import impl_idl_ovn from ovn_octavia_provider.ovsdb import ovsdb_monitor config.register_opts() LOG = log.getLogger(__name__) class OvnNbTransaction(idl_trans.Transaction): def __init__(self, *args, **kwargs): # NOTE(lucasagomes): The bump_nb_cfg parameter is only used by # the agents health status check self.bump_nb_cfg = kwargs.pop('bump_nb_cfg', False) super().__init__(*args, **kwargs) def pre_commit(self, txn): if not self.bump_nb_cfg: return self.api.nb_global.increment('nb_cfg') # This version of Backend doesn't use a class variable for ovsdb_connection # and therefor allows networking-ovn to manage connection scope on its own class Backend(ovs_idl.Backend): lookup_table = {} ovsdb_connection = None def __init__(self, connection): self.ovsdb_connection = connection super().__init__(connection) def start_connection(self, connection): try: self.ovsdb_connection.start() except Exception as e: connection_exception = OvsdbConnectionUnavailable( db_schema=self.schema, error=e) LOG.exception(connection_exception) raise connection_exception from e @property def idl(self): return self.ovsdb_connection.idl @property def tables(self): return self.idl.tables _tables = tables def is_table_present(self, table_name): return table_name in self._tables def is_col_present(self, table_name, col_name): return self.is_table_present(table_name) and ( col_name in self._tables[table_name].columns) def create_transaction(self, check_error=False, log_errors=True): return idl_trans.Transaction( self, self.ovsdb_connection, self.ovsdb_connection.timeout, check_error, log_errors) # Check for a column match in the table. If not found do a retry with # a stop delay of 10 secs. This function would be useful if the caller # wants to verify for the presence of a particular row in the table # with the column match before doing any transaction. # Eg. We can check if Logical_Switch row is present before adding a # logical switch port to it. @tenacity.retry(retry=tenacity.retry_if_exception_type(RuntimeError), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_delay(10), reraise=True) def check_for_row_by_value_and_retry(self, table, column, match): try: idlutils.row_by_value(self.idl, table, column, match) except idlutils.RowNotFound as e: msg = (_("%(match)s does not exist in %(column)s of %(table)s") % {'match': match, 'column': column, 'table': table}) raise RuntimeError(msg) from e class OvsdbConnectionUnavailable(n_exc.ServiceUnavailable): message = _("OVS database connection to %(db_schema)s failed with error: " "'%(error)s'. Verify that the OVS and OVN services are " "available and that the 'ovn_nb_connection' and " "'ovn_sb_connection' configuration options are correct.") class FindLbInTableCommand(command.ReadOnlyCommand): def __init__(self, api, lb, table): super().__init__(api) self.lb = lb self.table = table def run_idl(self, txn): self.result = [ rowview.RowView(item) for item in self.api.tables[self.table].rows.values() if self.lb in item.load_balancer] class GetLrsCommand(command.ReadOnlyCommand): def run_idl(self, txn): self.result = [ rowview.RowView(item) for item in self.api.tables['Logical_Router'].rows.values()] class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend): def __init__(self, connection): super().__init__(connection) self.idl._session.reconnect.set_probe_interval( config.get_ovn_ovsdb_probe_interval()) @property def nb_global(self): return next(iter(self.tables['NB_Global'].rows.values())) def create_transaction(self, check_error=False, log_errors=True, bump_nb_cfg=False): return OvnNbTransaction( self, self.ovsdb_connection, self.ovsdb_connection.timeout, check_error, log_errors, bump_nb_cfg=bump_nb_cfg) @contextlib.contextmanager def transaction(self, *args, **kwargs): """A wrapper on the ovsdbapp transaction to work with revisions. This method is just a wrapper around the ovsdbapp transaction to handle revision conflicts correctly. """ try: with super().transaction(*args, **kwargs) as t: yield t except ovn_exc.RevisionConflict as e: LOG.info('Transaction aborted. Reason: %s', e) def find_lb_in_table(self, lb, table): return FindLbInTableCommand(self, lb, table) def get_lrs(self): return GetLrsCommand(self) class OvsdbSbOvnIdl(sb_impl_idl.OvnSbApiIdlImpl, Backend): def __init__(self, connection): super().__init__(connection) self.idl._session.reconnect.set_probe_interval( config.get_ovn_ovsdb_probe_interval()) class OvnNbIdlForLb(ovsdb_monitor.OvnIdl): SCHEMA = "OVN_Northbound" TABLES = ('Logical_Switch', 'Load_Balancer', 'Load_Balancer_Health_Check', 'Logical_Router', 'Logical_Switch_Port', 'Logical_Router_Port', 'Gateway_Chassis', 'NAT') def __init__(self, event_lock_name=None): self.conn_string = config.get_ovn_nb_connection() ovsdb_monitor._check_and_set_ssl_files(self.SCHEMA) helper = self._get_ovsdb_helper(self.conn_string) for table in OvnNbIdlForLb.TABLES: helper.register_table(table) super().__init__( driver=None, remote=self.conn_string, schema=helper) self.event_lock_name = event_lock_name if self.event_lock_name: self.set_lock(self.event_lock_name) atexit.register(self.stop) @tenacity.retry( wait=tenacity.wait_exponential( max=config.get_ovn_ovsdb_retry_max_interval()), reraise=True) def _get_ovsdb_helper(self, connection_string): return idlutils.get_schema_helper(connection_string, self.SCHEMA) def start(self): self.conn = connection.Connection( self, timeout=config.get_ovn_ovsdb_timeout()) return impl_idl_ovn.OvsdbNbOvnIdl(self.conn) def stop(self): # Close the running connection if it has been initalized if hasattr(self, 'conn'): if not self.conn.stop(timeout=config.get_ovn_ovsdb_timeout()): LOG.debug("Connection terminated to OvnNb " "but a thread is still alive") del self.conn # complete the shutdown for the event handler self.notify_handler.shutdown() # Close the idl session self.close() class OvnSbIdlForLb(ovsdb_monitor.OvnIdl): SCHEMA = "OVN_Southbound" TABLES = ('Load_Balancer', 'Service_Monitor') def __init__(self, event_lock_name=None): self.conn_string = config.get_ovn_sb_connection() ovsdb_monitor._check_and_set_ssl_files(self.SCHEMA) helper = self._get_ovsdb_helper(self.conn_string) for table in OvnSbIdlForLb.TABLES: helper.register_table(table) super().__init__( driver=None, remote=self.conn_string, schema=helper) self.event_lock_name = event_lock_name if self.event_lock_name: self.set_lock(self.event_lock_name) atexit.register(self.stop) @tenacity.retry( wait=tenacity.wait_exponential( max=config.get_ovn_ovsdb_retry_max_interval()), reraise=True) def _get_ovsdb_helper(self, connection_string): return idlutils.get_schema_helper(connection_string, self.SCHEMA) def start(self): self.conn = connection.Connection( self, timeout=config.get_ovn_ovsdb_timeout()) return impl_idl_ovn.OvsdbSbOvnIdl(self.conn) def stop(self): # Close the running connection if it has been initalized if hasattr(self, 'conn'): if not self.conn.stop(timeout=config.get_ovn_ovsdb_timeout()): LOG.debug("Connection terminated to OvnSb " "but a thread is still alive") del self.conn # complete the shutdown for the event handler self.notify_handler.shutdown() # Close the idl session self.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/ovsdb/ovsdb_monitor.py0000664000175000017500000000717600000000000026422 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log from ovs.stream import Stream from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp import event from ovn_octavia_provider.common import config as ovn_config CONF = cfg.CONF LOG = log.getLogger(__name__) class BaseOvnIdl(connection.OvsdbIdl): @classmethod def from_server(cls, connection_string, schema_name): _check_and_set_ssl_files(schema_name) helper = idlutils.get_schema_helper(connection_string, schema_name) helper.register_all() return cls(connection_string, helper) class OvnIdl(BaseOvnIdl): def __init__(self, driver, remote, schema): super().__init__(remote, schema) self.driver = driver self.notify_handler = OvnDbNotifyHandler(driver) # ovsdb lock name to acquire. # This event lock is used to handle the notify events sent by idl.Idl # idl.Idl will call notify function for the "update" rpc method it # receives from the ovsdb-server. # This event lock is required for the following reasons # - If there are multiple neutron servers running, OvnWorkers of # these neutron servers would receive the notify events from # idl.Idl # # - we do not want all the neutron servers to handle these events # # - only the neutron server which has the lock will handle the # notify events. # # - In case the neutron server which owns this lock goes down, # ovsdb server would assign the lock to one of the other neutron # servers. self.event_lock_name = "ovn_provider_driver_event_lock" def notify(self, event, row, updates=None): # Do not handle the notification if the event lock is requested, # but not granted by the ovsdb-server. if self.is_lock_contended: return row = idlutils.frozen_row(row) self.notify_handler.notify(event, row, updates) @abc.abstractmethod def post_connect(self): """Should be called after the idl has been initialized""" class OvnDbNotifyHandler(event.RowEventHandler): def __init__(self, driver): super().__init__() self.driver = driver def _check_and_set_ssl_files(schema_name): if schema_name == 'OVN_Northbound': priv_key_file = ovn_config.get_ovn_nb_private_key() cert_file = ovn_config.get_ovn_nb_certificate() ca_cert_file = ovn_config.get_ovn_nb_ca_cert() Stream.ssl_set_private_key_file(priv_key_file) Stream.ssl_set_certificate_file(cert_file) Stream.ssl_set_ca_cert_file(ca_cert_file) if schema_name == 'OVN_Southbound': priv_key_file = ovn_config.get_ovn_sb_private_key() cert_file = ovn_config.get_ovn_sb_certificate() ca_cert_file = ovn_config.get_ovn_sb_ca_cert() Stream.ssl_set_private_key_file(priv_key_file) Stream.ssl_set_certificate_file(cert_file) Stream.ssl_set_ca_cert_file(ca_cert_file) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4525883 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/0000775000175000017500000000000000000000000023176 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/__init__.py0000664000175000017500000000000000000000000025275 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4525883 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/functional/0000775000175000017500000000000000000000000025340 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/functional/__init__.py0000664000175000017500000000000000000000000027437 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/functional/base.py0000664000175000017500000011764200000000000026637 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from neutron.common import utils as n_utils from neutron_lib.plugins import directory from octavia_lib.api.drivers import data_models as octavia_data_model from octavia_lib.api.drivers import driver_lib from octavia_lib.common import constants as o_constants from oslo_serialization import jsonutils from oslo_utils import uuidutils from ovsdbapp.schema.ovn_northbound import impl_idl as nb_idl_ovn from ovsdbapp.schema.ovn_southbound import impl_idl as sb_idl_ovn # NOTE(mjozefcz): We need base neutron functionals because we need # mechanism driver and l3 plugin. from neutron.tests.functional import base from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider import driver as ovn_driver class TestOvnOctaviaBase(base.TestOVNFunctionalBase, base.BaseLoggingTestCase): def setUp(self): super().setUp() nb_idl_ovn.OvnNbApiIdlImpl.ovsdb_connection = None sb_idl_ovn.OvnSbApiIdlImpl.ovsdb_connection = None # TODO(mjozefcz): Use octavia listeners to provide needed # sockets and modify tests in order to verify if fake # listener (status) has received valid value. try: mock.patch.object( driver_lib.DriverLibrary, '_check_for_socket_ready').start() except AttributeError: # Backward compatiblity with octavia-lib < 1.3.1 pass self.ovn_driver = ovn_driver.OvnProviderDriver() self.ovn_driver._ovn_helper._octavia_driver_lib = mock.MagicMock() self._o_driver_lib = self.ovn_driver._ovn_helper._octavia_driver_lib self._o_driver_lib.update_loadbalancer_status = mock.Mock() self.fake_neutron_client = mock.MagicMock() clients.get_neutron_client = mock.MagicMock() clients.get_neutron_client.return_value = self.fake_neutron_client self.fake_neutron_client.show_subnet = self._mock_show_subnet self.fake_neutron_client.list_ports = self._mock_list_ports self.fake_neutron_client.show_port = self._mock_show_port self.fake_neutron_client.delete_port.return_value = True self._local_net_cache = {} self._local_port_cache = {'ports': []} self.core_plugin = directory.get_plugin() def _mock_show_subnet(self, subnet_id): subnet = {} subnet['network_id'] = self._local_net_cache[subnet_id] return {'subnet': subnet} def _mock_list_ports(self, **kwargs): return self._local_port_cache def _mock_show_port(self, port_id): for port in self._local_port_cache['ports']: if port['id'] == port_id: return {'port': port} def _create_provider_network(self): e1 = self._make_network(self.fmt, 'e1', True, arg_list=('router:external', 'provider:network_type', 'provider:physical_network'), **{'router:external': True, 'provider:network_type': 'flat', 'provider:physical_network': 'public'}) res = self._create_subnet(self.fmt, e1['network']['id'], '100.0.0.0/24', gateway_ip='100.0.0.254', allocation_pools=[{'start': '100.0.0.2', 'end': '100.0.0.253'}], enable_dhcp=False) e1_s1 = self.deserialize(self.fmt, res) return e1, e1_s1 def _create_lb_model(self, vip=None, vip_network_id=None, vip_subnet_id=None, vip_port_id=None, admin_state_up=True): lb = octavia_data_model.LoadBalancer() lb.loadbalancer_id = uuidutils.generate_uuid() if vip: lb.vip_address = vip else: lb.vip_address = '10.0.0.4' if vip_network_id: lb.vip_network_id = vip_network_id if vip_subnet_id: lb.vip_subnet_id = vip_subnet_id if vip_port_id: lb.vip_port_id = vip_port_id lb.admin_state_up = admin_state_up return lb def _create_pool_model( self, loadbalancer_id, pool_name, protocol=o_constants.PROTOCOL_TCP, lb_algorithm=o_constants.LB_ALGORITHM_SOURCE_IP_PORT, admin_state_up=True, listener_id=None): m_pool = octavia_data_model.Pool() if protocol: m_pool.protocol = protocol else: m_pool.protocol = o_constants.PROTOCOL_TCP m_pool.name = pool_name m_pool.pool_id = uuidutils.generate_uuid() m_pool.loadbalancer_id = loadbalancer_id m_pool.members = [] m_pool.admin_state_up = admin_state_up m_pool.lb_algorithm = lb_algorithm if listener_id: m_pool.listener_id = listener_id return m_pool def _create_member_model(self, pool_id, subnet_id, address, protocol_port=None, admin_state_up=True): m_member = octavia_data_model.Member() if protocol_port: m_member.protocol_port = protocol_port else: m_member.protocol_port = 80 m_member.member_id = uuidutils.generate_uuid() m_member.pool_id = pool_id if subnet_id: m_member.subnet_id = subnet_id m_member.address = address m_member.admin_state_up = admin_state_up return m_member def _create_listener_model(self, loadbalancer_id, pool_id=None, protocol_port=80, protocol=None, admin_state_up=True): m_listener = octavia_data_model.Listener() if protocol: m_listener.protocol = protocol else: m_listener.protocol = o_constants.PROTOCOL_TCP m_listener.listener_id = uuidutils.generate_uuid() m_listener.loadbalancer_id = loadbalancer_id if pool_id: m_listener.default_pool_id = pool_id m_listener.protocol_port = protocol_port m_listener.admin_state_up = admin_state_up return m_listener def _get_loadbalancers(self): lbs = [] for lb in self.nb_api.tables['Load_Balancer'].rows.values(): external_ids = dict(lb.external_ids) # Skip load balancers used by port forwarding plugin if external_ids.get(ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY) == ( ovn_const.PORT_FORWARDING_PLUGIN): continue ls_refs = external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) if ls_refs: external_ids[ ovn_const.LB_EXT_IDS_LS_REFS_KEY] = jsonutils.loads( ls_refs) lb_dict = {'name': lb.name, 'protocol': lb.protocol, 'vips': lb.vips, 'external_ids': external_ids} try: lb_dict['selection_fields'] = lb.selection_fields except AttributeError: pass lbs.append(lb_dict) return lbs def _get_loadbalancer_id(self, lb_name): for lb in self.nb_api.tables['Load_Balancer'].rows.values(): if lb.name == lb_name: return lb.uuid def _validate_loadbalancers(self, expected_lbs): observed_lbs = self._get_loadbalancers() # NOTE (mjozefcz): assertCountEqual works only on first level # of comparison, if dicts inside dicts are in different # order it would fail. self.assertEqual(len(expected_lbs), len(observed_lbs)) for expected_lb in expected_lbs: # search for LB with same name and protocol found = False for observed_lb in observed_lbs: if (observed_lb.get('name') == expected_lb.get('name') and observed_lb.get('protocol') == expected_lb.get('protocol')): self.assertEqual(expected_lb, observed_lb) found = True if not found: raise Exception("Expected LB %s for protocol %s " "not found in observed_lbs" % ( expected_lb.get('name'), expected_lb.get('proto'))) def _is_lb_associated_to_ls(self, lb_name, ls_name): return self._is_lb_associated_to_tab( 'Logical_Switch', lb_name, ls_name) def _is_lb_associated_to_lr(self, lb_name, lr_name): return self._is_lb_associated_to_tab( 'Logical_Router', lb_name, lr_name) def _is_lb_associated_to_tab(self, table, lb_name, ls_name): lb_uuid = self._get_loadbalancer_id(lb_name) for ls in self.nb_api.tables[table].rows.values(): if ls.name == ls_name: ls_lbs = [lb.uuid for lb in ls.load_balancer] return lb_uuid in ls_lbs return False def _create_router(self, name, gw_info=None): router = {'router': {'name': name, 'admin_state_up': True, 'tenant_id': self._tenant_id}} if gw_info: router['router']['external_gateway_info'] = gw_info router = self.l3_plugin.create_router(self.context, router) return router['id'] def _create_net(self, name, cidr, router_id=None): n1 = self._make_network(self.fmt, name, True) res = self._create_subnet(self.fmt, n1['network']['id'], cidr) subnet = self.deserialize(self.fmt, res)['subnet'] self._local_net_cache[subnet['id']] = n1['network']['id'] port = self._make_port(self.fmt, n1['network']['id']) if router_id: self.l3_plugin.add_router_interface( self.context, router_id, {'subnet_id': subnet['id']}) self._local_port_cache['ports'].append(port['port']) vip_port_address = port['port']['fixed_ips'][0]['ip_address'] return (n1['network']['id'], subnet['id'], vip_port_address, port['port']['id']) def _update_ls_refs(self, lb_data, net_id, add_ref=True): if not net_id.startswith(ovn_const.LR_REF_KEY_HEADER): net_id = ovn_const.LR_REF_KEY_HEADER + '%s' % net_id if add_ref: if net_id not in lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY]: lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1 else: ref_ct = lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] if ref_ct <= 0: del lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] def _wait_for_status(self, expected_statuses, check_call=True): call_count = len(expected_statuses) update_loadbalancer_status = ( self._o_driver_lib.update_loadbalancer_status) n_utils.wait_until_true( lambda: update_loadbalancer_status.call_count == call_count, timeout=10) if check_call: # NOTE(mjozefcz): The updates are send in parallel and includes # dicts with unordered lists inside. So we can't simply use # assert_has_calls here. Sample structure: # {'listeners': [], # 'loadbalancers': [{'id': 'a', 'provisioning_status': 'ACTIVE'}], # 'members': [{'id': 'b', 'provisioning_status': 'DELETED'}, # {'id': 'c', 'provisioning_status': 'DELETED'}], # 'pools': [{'id': 'd', 'operating_status': 'ONLINE', # 'provisioning_status': 'ACTIVE'}]}, updated_statuses = [] for call in update_loadbalancer_status.mock_calls: updated_statuses.append(call[1][0]) calls_found = [] for expected_status in expected_statuses: for updated_status in updated_statuses: # Find status update having equal keys if (sorted(updated_status.keys()) == sorted(expected_status.keys())): val_check = [] # Withing this status update check if all values of # expected keys match. for k, v in expected_status.items(): val_check.append( sorted(expected_status[k], key=lambda x: x['id']) == sorted(updated_status[k], key=lambda x: x['id'])) if False in val_check: # At least one value don't match. continue calls_found.append(expected_status) break # Validate if we found all expected calls. self.assertCountEqual(expected_statuses, calls_found) def _wait_for_status_and_validate(self, lb_data, expected_status, check_call=True): self._wait_for_status(expected_status, check_call) expected_lbs = self._make_expected_lbs(lb_data) self._validate_loadbalancers(expected_lbs) def _create_load_balancer_and_validate(self, lb_info, admin_state_up=True, only_model=False, create_router=True, multiple_lb=False): self._o_driver_lib.update_loadbalancer_status.reset_mock() lb_data = {} r_id = self._create_router("r1") if create_router else None if r_id: lb_data[ovn_const.LB_EXT_IDS_LR_REF_KEY] = ( ovn_const.LR_REF_KEY_HEADER + r_id) net_info = self._create_net(lb_info['vip_network'], lb_info['cidr'], router_id=r_id) lb_data['vip_net_info'] = net_info lb_data['model'] = self._create_lb_model(vip=net_info[2], vip_network_id=net_info[0], vip_subnet_id=net_info[1], vip_port_id=net_info[3], admin_state_up=admin_state_up) lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = {} lb_data['listeners'] = [] lb_data['pools'] = [] self._update_ls_refs(lb_data, net_info[0]) if only_model: return lb_data self.ovn_driver.loadbalancer_create(lb_data['model']) name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, lb_data['model'].loadbalancer_id) self.driver.update_port( self.context, net_info[3], {'port': {'name': name}}) if lb_data['model'].admin_state_up: expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.ONLINE}] } else: expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.OFFLINE}] } if not multiple_lb: self._wait_for_status_and_validate(lb_data, [expected_status]) else: l_id = lb_data['model'].loadbalancer_id self._wait_for_status([expected_status]) self.assertIn(l_id, [lb['name'] for lb in self._get_loadbalancers()]) self.assertTrue( self._is_lb_associated_to_ls( lb_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + net_info[0])) return lb_data def _update_load_balancer_and_validate(self, lb_data, admin_state_up=None): self._o_driver_lib.update_loadbalancer_status.reset_mock() if admin_state_up is not None: lb_data['model'].admin_state_up = admin_state_up self.ovn_driver.loadbalancer_update( lb_data['model'], lb_data['model']) if lb_data['model'].admin_state_up: expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.ONLINE}] } else: expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.OFFLINE}] } self._wait_for_status_and_validate(lb_data, [expected_status]) def _delete_load_balancer_and_validate(self, lb_data, cascade=False, multiple_lb=False): self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.loadbalancer_delete(lb_data['model'], cascade) expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "DELETED", "operating_status": "OFFLINE"}] } if cascade: expected_status['pools'] = [] expected_status['members'] = [] expected_status['listeners'] = [] for pool in lb_data['pools']: expected_status['pools'].append({ 'id': pool.pool_id, 'provisioning_status': 'DELETED'}) for member in pool.members: expected_status['members'].append({ "id": member.member_id, "provisioning_status": "DELETED"}) for listener in lb_data['listeners']: expected_status['listeners'].append({ "id": listener.listener_id, "provisioning_status": "DELETED", "operating_status": "OFFLINE"}) expected_status = { key: value for key, value in expected_status.items() if value} l_id = lb_data['model'].loadbalancer_id lb = lb_data['model'] del lb_data['model'] if not multiple_lb: self._wait_for_status_and_validate(lb_data, [expected_status]) else: self._wait_for_status([expected_status]) self.assertNotIn( l_id, [lbs['name'] for lbs in self._get_loadbalancers()]) vip_net_id = lb_data['vip_net_info'][0] self.assertFalse( self._is_lb_associated_to_ls( lb.loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + vip_net_id)) def _make_expected_lbs(self, lb_data): def _get_lb_field_by_protocol(protocol, field='external_ids'): "Get needed external_ids and pass by reference" lb = [lb for lb in expected_lbs if lb.get('protocol') == [protocol]] return lb[0].get(field) if not lb_data or not lb_data.get('model'): return [] vip_net_info = lb_data['vip_net_info'] external_ids = {ovn_const.LB_EXT_IDS_LS_REFS_KEY: {}, 'neutron:vip': lb_data['model'].vip_address, 'neutron:vip_port_id': vip_net_info[3], 'enabled': str(lb_data['model'].admin_state_up)} # NOTE(mjozefcz): By default we don't set protocol. We don't know if # listener/pool would be TCP, UDP or SCTP, so do not set it. expected_protocols = set() # Lets fetch list of L4 protocols defined for this LB. for p in lb_data['pools']: expected_protocols.add(p.protocol.lower()) for listener in lb_data['listeners']: expected_protocols.add(listener.protocol.lower()) # If there is no protocol lets add default - empty []. expected_protocols = list(expected_protocols) if len(expected_protocols) == 0: expected_protocols.append(None) expected_lbs = [] for protocol in expected_protocols: lb = {'name': lb_data['model'].loadbalancer_id, 'protocol': [protocol] if protocol else [], 'vips': {}, 'external_ids': copy.deepcopy(external_ids)} if self.ovn_driver._ovn_helper._are_selection_fields_supported(): lb['selection_fields'] = ovn_const.LB_SELECTION_FIELDS_MAP[ o_constants.LB_ALGORITHM_SOURCE_IP_PORT] expected_lbs.append(lb) # For every connected subnet to the LB set the ref # counter. for net_id, ref_ct in lb_data[ ovn_const.LB_EXT_IDS_LS_REFS_KEY].items(): for lb in expected_lbs: # If given LB hasn't VIP configured from # this network we shouldn't touch it here. if net_id == 'neutron-%s' % lb_data['model'].vip_network_id: lb.get('external_ids')[ ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1 # For every connected router set it here. if lb_data.get(ovn_const.LB_EXT_IDS_LR_REF_KEY): for lb in expected_lbs: lb.get('external_ids')[ ovn_const.LB_EXT_IDS_LR_REF_KEY] = lb_data[ ovn_const.LB_EXT_IDS_LR_REF_KEY] pool_info = {} for p in lb_data.get('pools', []): external_ids = _get_lb_field_by_protocol( p.protocol.lower(), field='external_ids') p_members = "" for m in p.members: if not m.admin_state_up: continue m_info = 'member_' + m.member_id + '_' + m.address m_info += ":" + str(m.protocol_port) m_info += "_" + str(m.subnet_id) if p_members: p_members += "," + m_info else: p_members = m_info # Bump up LS refs counter if needed. if m.subnet_id: # Need to get the network_id. for port in self._local_port_cache['ports']: for fixed_ip in port['fixed_ips']: if fixed_ip['subnet_id'] == m.subnet_id: ex = external_ids[ ovn_const.LB_EXT_IDS_LS_REFS_KEY] act = ex.get( 'neutron-%s' % port['network_id'], 0) ex['neutron-%s' % port['network_id']] = act + 1 break pool_key = 'pool_' + p.pool_id if not p.admin_state_up: pool_key += ':D' external_ids[pool_key] = p_members pool_info[p.pool_id] = p_members for listener in lb_data['listeners']: expected_vips = _get_lb_field_by_protocol( listener.protocol.lower(), field='vips') external_ids = _get_lb_field_by_protocol( listener.protocol.lower(), field='external_ids') listener_k = 'listener_' + str(listener.listener_id) if lb_data['model'].admin_state_up and listener.admin_state_up: vip_k = lb_data['model'].vip_address + ":" + str( listener.protocol_port) if not isinstance(listener.default_pool_id, octavia_data_model.UnsetType) and pool_info[ listener.default_pool_id]: expected_vips[vip_k] = self._extract_member_info( pool_info[listener.default_pool_id]) else: listener_k += ':D' external_ids[listener_k] = str(listener.protocol_port) + ":" if not isinstance(listener.default_pool_id, octavia_data_model.UnsetType): external_ids[listener_k] += 'pool_' + listener.default_pool_id elif lb_data.get('pools', []): external_ids[listener_k] += 'pool_' + lb_data[ 'pools'][0].pool_id return expected_lbs def _extract_member_info(self, member): mem_info = '' if member: for item in member.split(','): mem_info += item.split('_')[2] + "," return mem_info[:-1] def _create_pool_and_validate(self, lb_data, pool_name, protocol=None, listener_id=None): lb_pools = lb_data['pools'] m_pool = self._create_pool_model(lb_data['model'].loadbalancer_id, pool_name, protocol=protocol, listener_id=listener_id) lb_pools.append(m_pool) self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.pool_create(m_pool) operating_status = ( o_constants.ONLINE if listener_id else o_constants.OFFLINE) expected_status = { 'pools': [{'id': m_pool.pool_id, 'provisioning_status': 'ACTIVE', 'operating_status': operating_status}], 'loadbalancers': [{'id': m_pool.loadbalancer_id, 'provisioning_status': 'ACTIVE'}] } if listener_id: expected_status['listeners'] = [ {'id': listener_id, 'provisioning_status': 'ACTIVE'}] self._wait_for_status_and_validate(lb_data, [expected_status]) expected_lbs = self._make_expected_lbs(lb_data) self._validate_loadbalancers(expected_lbs) def _update_pool_and_validate(self, lb_data, pool_name, admin_state_up=None): self._o_driver_lib.update_loadbalancer_status.reset_mock() m_pool = self._get_pool_from_lb_data(lb_data, pool_name=pool_name) old_admin_state_up = m_pool.admin_state_up operating_status = 'ONLINE' if admin_state_up is not None: m_pool.admin_state_up = admin_state_up if not admin_state_up: operating_status = 'OFFLINE' pool_listeners = self._get_pool_listeners(lb_data, m_pool.pool_id) expected_listener_status = [ {'id': listener.listener_id, 'provisioning_status': 'ACTIVE'} for listener in pool_listeners] self.ovn_driver.pool_update(m_pool, m_pool) expected_status = { 'pools': [{'id': m_pool.pool_id, 'provisioning_status': 'ACTIVE', 'operating_status': operating_status}], 'loadbalancers': [{'id': m_pool.loadbalancer_id, 'provisioning_status': 'ACTIVE'}], 'listeners': expected_listener_status } if old_admin_state_up != m_pool.admin_state_up: if m_pool.admin_state_up: oper_status = o_constants.ONLINE else: oper_status = o_constants.OFFLINE expected_status['pools'][0]['operating_status'] = oper_status self._wait_for_status_and_validate(lb_data, [expected_status]) def _delete_pool_and_validate(self, lb_data, pool_name, listener_id=None): self._o_driver_lib.update_loadbalancer_status.reset_mock() p = self._get_pool_from_lb_data(lb_data, pool_name=pool_name) self.ovn_driver.pool_delete(p) lb_data['pools'].remove(p) expected_status = [] # When a pool is deleted and if it has any members, there are # expected to be deleted. for m in p.members: expected_status.append( {'pools': [{"id": p.pool_id, "provisioning_status": o_constants.ACTIVE, "operating_status": o_constants.ONLINE}], 'members': [{"id": m.member_id, "provisioning_status": "DELETED"}], 'loadbalancers': [{"id": p.loadbalancer_id, "provisioning_status": "ACTIVE"}], 'listeners': []}) self._update_ls_refs( lb_data, self._local_net_cache[m.subnet_id], add_ref=False) if p.members: # If Pool has members, delete all members of the pool. When the # last member is processed set Operating status of Pool as Offline expected_status[-1]['pools'][0][ 'operating_status'] = o_constants.OFFLINE pool_dict = { 'pools': [{'id': p.pool_id, 'provisioning_status': 'DELETED'}], 'loadbalancers': [{'id': p.loadbalancer_id, 'provisioning_status': 'ACTIVE'}], 'listeners': [] } if listener_id: pool_dict['listeners'] = [{'id': listener_id, 'provisioning_status': 'ACTIVE'}] expected_status.append(pool_dict) self._wait_for_status_and_validate(lb_data, expected_status) def _get_pool_from_lb_data(self, lb_data, pool_id=None, pool_name=None): for p in lb_data['pools']: if pool_id and p.pool_id == pool_id: return p if pool_name and p.name == pool_name: return p def _get_listener_from_lb_data(self, lb_data, protocol, protocol_port): for listener in lb_data['listeners']: if (listener.protocol_port == protocol_port and listener.protocol == protocol): return listener def _get_pool_listeners(self, lb_data, pool_id): listeners = [] for listener in lb_data['listeners']: if listener.default_pool_id == pool_id: listeners.append(listener) return listeners def _create_member_and_validate(self, lb_data, pool_id, subnet_id, network_id, address, expected_subnet=None): self._o_driver_lib.update_loadbalancer_status.reset_mock() pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) pool_status = {'id': pool.pool_id, 'provisioning_status': o_constants.ACTIVE, 'operating_status': o_constants.ONLINE} m_member = self._create_member_model(pool.pool_id, subnet_id, address) # The "expected" member value, which might be different from what # we pass to member_create(), for example, if an expected_subnet # was given. if expected_subnet: e_member = copy.deepcopy(m_member) e_member.subnet_id = expected_subnet else: e_member = m_member pool.members.append(e_member) self.ovn_driver.member_create(m_member) self._update_ls_refs(lb_data, network_id) pool_listeners = self._get_pool_listeners(lb_data, pool_id) expected_listener_status = [ {'id': listener.listener_id, 'provisioning_status': 'ACTIVE'} for listener in pool_listeners] expected_status = { 'pools': [pool_status], 'members': [{"id": m_member.member_id, "provisioning_status": "ACTIVE"}], 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': 'ACTIVE'}], 'listeners': expected_listener_status } self._wait_for_status_and_validate(lb_data, [expected_status]) def _get_pool_member(self, pool, member_address): for m in pool.members: if m.address == member_address: return m def _update_member_and_validate(self, lb_data, pool_id, member_address): pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) member = self._get_pool_member(pool, member_address) self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.member_update(member, member) expected_status = { 'pools': [{'id': pool.pool_id, 'provisioning_status': 'ACTIVE'}], 'members': [{"id": member.member_id, 'provisioning_status': 'ACTIVE'}], 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': 'ACTIVE'}], 'listeners': [] } if getattr(member, 'admin_state_up', None): expected_status['members'][0]['operating_status'] = "ONLINE" else: expected_status['members'][0]['operating_status'] = "OFFLINE" self._wait_for_status_and_validate(lb_data, [expected_status]) def _update_members_in_batch_and_validate(self, lb_data, pool_id, members): pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) expected_status = [] self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.member_batch_update(pool_id, members) for member in members: expected_status.append( {'pools': [{'id': pool.pool_id, 'provisioning_status': 'ACTIVE'}], 'members': [{'id': member.member_id, 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': 'ACTIVE'}], 'listeners': []}) for m in pool.members: found = False for member in members: if member.member_id == m.member_id: found = True break if not found: expected_status.append( {'pools': [{'id': pool.pool_id, 'provisioning_status': 'ACTIVE'}], 'members': [{'id': m.member_id, 'provisioning_status': 'DELETED'}], 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': 'ACTIVE'}], 'listeners': []}) # Delete member from lb_data pool.members.remove(m) self._wait_for_status_and_validate(lb_data, expected_status, check_call=False) def _delete_member_and_validate(self, lb_data, pool_id, network_id, member_address): pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) member = self._get_pool_member(pool, member_address) pool.members.remove(member) pool_status = {"id": pool.pool_id, "provisioning_status": o_constants.ACTIVE, "operating_status": o_constants.ONLINE} if not pool.members: pool_status['operating_status'] = o_constants.OFFLINE self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.member_delete(member) expected_status = { 'pools': [pool_status], 'members': [{"id": member.member_id, "provisioning_status": "DELETED"}], 'loadbalancers': [{"id": pool.loadbalancer_id, "provisioning_status": "ACTIVE"}], 'listeners': []} self._update_ls_refs(lb_data, network_id, add_ref=False) self._wait_for_status_and_validate(lb_data, [expected_status]) def _create_listener_and_validate(self, lb_data, pool_id=None, protocol_port=80, admin_state_up=True, protocol='TCP'): if pool_id: pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) loadbalancer_id = pool.loadbalancer_id pool_id = pool.pool_id else: loadbalancer_id = lb_data['model'].loadbalancer_id pool_id = None m_listener = self._create_listener_model(loadbalancer_id, pool_id, protocol_port, protocol=protocol, admin_state_up=admin_state_up) lb_data['listeners'].append(m_listener) self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.listener_create(m_listener) expected_status = { 'listeners': [{'id': m_listener.listener_id, 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'loadbalancers': [{'id': m_listener.loadbalancer_id, 'provisioning_status': "ACTIVE"}]} self._wait_for_status_and_validate(lb_data, [expected_status]) def _update_listener_and_validate(self, lb_data, protocol_port=80, admin_state_up=None, protocol='TCP'): m_listener = self._get_listener_from_lb_data( lb_data, protocol, protocol_port) self._o_driver_lib.update_loadbalancer_status.reset_mock() old_admin_state_up = m_listener.admin_state_up operating_status = 'ONLINE' if admin_state_up is not None: m_listener.admin_state_up = admin_state_up if not admin_state_up: operating_status = 'OFFLINE' m_listener.protocol = protocol self.ovn_driver.listener_update(m_listener, m_listener) pool_status = [{'id': m_listener.default_pool_id, 'provisioning_status': 'ACTIVE'}] expected_status = { 'listeners': [{'id': m_listener.listener_id, 'provisioning_status': 'ACTIVE', 'operating_status': operating_status}], 'loadbalancers': [{"id": m_listener.loadbalancer_id, "provisioning_status": "ACTIVE"}], 'pools': pool_status} if old_admin_state_up != m_listener.admin_state_up: if m_listener.admin_state_up: oper_status = o_constants.ONLINE else: oper_status = o_constants.OFFLINE expected_status['listeners'][0]['operating_status'] = oper_status self._wait_for_status_and_validate(lb_data, [expected_status]) def _delete_listener_and_validate(self, lb_data, protocol='TCP', protocol_port=80): m_listener = self._get_listener_from_lb_data( lb_data, protocol, protocol_port) lb_data['listeners'].remove(m_listener) self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.listener_delete(m_listener) expected_status = { 'listeners': [{"id": m_listener.listener_id, "provisioning_status": "DELETED", "operating_status": "OFFLINE"}], 'loadbalancers': [{"id": m_listener.loadbalancer_id, "provisioning_status": "ACTIVE"}]} self._wait_for_status_and_validate(lb_data, [expected_status]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/functional/requirements.txt0000664000175000017500000000040200000000000030620 0ustar00zuulzuul00000000000000# Additional requirements for functional tests # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/functional/test_agent.py0000664000175000017500000002357100000000000030057 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import multiprocessing as mp from neutron.common import utils as n_utils from ovn_octavia_provider import agent as ovn_agent from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider import event as ovn_event from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.ovsdb import impl_idl_ovn from ovn_octavia_provider.tests.functional import base as ovn_base class TestOvnOctaviaProviderAgent(ovn_base.TestOvnOctaviaBase): def setUp(self): super().setUp() self._initialize_ovn_da() def _initialize_ovn_da(self): # NOTE(mjozefcz): In theory this is separate process # with IDL running, but to make it easier for now # we can initialize this IDL here instead spawning # another process. da_helper = ovn_helper.OvnProviderHelper() events = [ovn_event.LogicalRouterPortEvent(da_helper), ovn_event.LogicalSwitchPortUpdateEvent(da_helper)] ovn_nb_idl_for_events = impl_idl_ovn.OvnNbIdlForLb( event_lock_name='func_test') ovn_nb_idl_for_events.notify_handler.watch_events(events) ovn_nb_idl_for_events.start() atexit.register(da_helper.shutdown) def _test_lrp_event_handler(self, cascade=False): # Create Network N1 on router R1 and LBA on N1 lba_data = self._create_load_balancer_and_validate( {'vip_network': 'N1', 'cidr': '10.0.0.0/24'}) router_id = lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY][ len(ovn_const.LR_REF_KEY_HEADER):] # Create Network N2, connect it to R1 nw_info = self._create_net("N2", "10.0.1.0/24", router_id) # Check if LBA exists in N2 LS n_utils.wait_until_true( lambda: self._is_lb_associated_to_ls( lba_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + nw_info[0]), timeout=10) # Create Network N3 lbb_data = self._create_load_balancer_and_validate( {'vip_network': 'N3', 'cidr': '10.0.2.0/24'}, create_router=False, multiple_lb=True) # Add N3 to R1 self.l3_plugin.add_router_interface( self.context, lba_data[ ovn_const.LB_EXT_IDS_LR_REF_KEY][ len(ovn_const.LR_REF_KEY_HEADER):], {'subnet_id': lbb_data['vip_net_info'][1]}) # Check LBB exists on R1 n_utils.wait_until_true( lambda: self._is_lb_associated_to_lr( lbb_data['model'].loadbalancer_id, lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY]), timeout=10) # Check LBA connected to N3 n_utils.wait_until_true( lambda: self._is_lb_associated_to_ls( lba_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + lbb_data['vip_net_info'][0]), timeout=10) # Check LBB connected to N1 n_utils.wait_until_true( lambda: self._is_lb_associated_to_ls( lbb_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + lba_data['vip_net_info'][0]), timeout=10) # Check LBB connected to N2 n_utils.wait_until_true( lambda: self._is_lb_associated_to_ls( lbb_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + nw_info[0]), timeout=10) lbb_id = lbb_data['model'].loadbalancer_id if not cascade: # N3 removed from R1 self.l3_plugin.remove_router_interface( self.context, lba_data[ ovn_const.LB_EXT_IDS_LR_REF_KEY][ len(ovn_const.LR_REF_KEY_HEADER):], {'subnet_id': lbb_data['vip_net_info'][1]}) else: # Delete LBB Cascade self._delete_load_balancer_and_validate(lbb_data, cascade=True, multiple_lb=True) # Check LBB doesn't exists on R1 n_utils.wait_until_true( lambda: not self._is_lb_associated_to_lr( lbb_id, lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY]), timeout=10) # Check LBB not connected to N1 n_utils.wait_until_true( lambda: not self._is_lb_associated_to_ls( lbb_id, ovn_const.LR_REF_KEY_HEADER + lba_data['vip_net_info'][0]), timeout=10) # Check LBB not connected to N2 n_utils.wait_until_true( lambda: not self._is_lb_associated_to_ls( lbb_id, ovn_const.LR_REF_KEY_HEADER + nw_info[0]), timeout=10) def test_lrp_event_handler_with_interface_delete(self): self._test_lrp_event_handler() def test_lrp_event_handler_with_loadbalancer_cascade_delete(self): self._test_lrp_event_handler(cascade=True) def test_lrp_event_handler_lrp_with_external_gateway(self): # Create Network N1 on router R1 and LBA on N1 lba_data = self._create_load_balancer_and_validate( {'vip_network': 'N1', 'cidr': '10.0.0.0/24'}) router_id = lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY][ len(ovn_const.LR_REF_KEY_HEADER):] # Create provider network N2, connect it to R1 provider_net, provider_subnet = self._create_provider_network() self.l3_plugin.update_router( self.context, router_id, {'router': { 'id': router_id, 'external_gateway_info': { 'enable_snat': True, 'network_id': provider_net['network']['id'], 'external_fixed_ips': [ {'ip_address': '100.0.0.2', 'subnet_id': provider_subnet['subnet']['id']}]}}}) # Check if LBA doesn't exist in provider network LS n_utils.wait_until_true( lambda: not self._is_lb_associated_to_ls( lba_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + provider_net['network']['id']), timeout=10) def test_fip_on_lb_vip(self): """This test checks if FIP on LB VIP is configured. This test validates if Load_Balancer VIP field consist Floating IP address that is configured on LB VIP port. """ # Create LB lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) # Create a pool self._create_pool_and_validate(lb_data, "p1") pool_id = lb_data['pools'][0].pool_id # Create listener self._create_listener_and_validate(lb_data, pool_id, 80) # Create Member-1 and associate it with lb_data self._create_member_and_validate( lb_data, pool_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') # Create provider network. e1, e1_s1 = self._create_provider_network() # Configure external_gateway for router router_id = lb_data['lr_ref'][8::] self.l3_plugin.update_router( self.context, router_id, {'router': { 'id': router_id, 'external_gateway_info': { 'enable_snat': True, 'network_id': e1['network']['id'], 'external_fixed_ips': [ {'ip_address': '100.0.0.2', 'subnet_id': e1_s1['subnet']['id']}]}}}) # Create floating IP on LB VIP port vip_port_id = lb_data['model'].vip_port_id vip_port = self.core_plugin.get_ports( self.context, filters={'id': [vip_port_id]})[0] self.l3_plugin.create_floatingip( self.context, {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': e1['network']['id'], 'subnet_id': None, 'floating_ip_address': '100.0.0.20', 'port_id': vip_port['id']}}) # Validate if FIP is stored as VIP in LB lbs = self._get_loadbalancers() expected_vips = { '%s:80' % vip_port['fixed_ips'][0]['ip_address']: '10.0.0.10:80', '100.0.0.20:80': '10.0.0.10:80'} self.assertDictEqual(expected_vips, lbs[0].get('vips')) provider_net = 'neutron-%s' % e1['network']['id'] tenant_net = 'neutron-%s' % lb_data['model'].vip_network_id for ls in self.nb_api.tables['Logical_Switch'].rows.values(): if ls.name == tenant_net: # Make sure that LB1 is added to tenant network self.assertIn( lb_data['model'].loadbalancer_id, [lb.name for lb in ls.load_balancer]) elif ls.name == provider_net: # Make sure that LB1 is not added to provider net - e1 LS self.assertListEqual([], ls.load_balancer) def test_agent_exit(self): exit_event = mp.Event() agent = mp.Process(target=ovn_agent.OvnProviderAgent, args=[exit_event]) agent.start() self.assertTrue(agent.is_alive()) exit_event.set() agent.join() self.assertFalse(agent.is_alive()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/functional/test_driver.py0000664000175000017500000004345400000000000030256 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia_lib.api.drivers import exceptions as o_exceptions from octavia_lib.common import constants as o_constants from ovn_octavia_provider.tests.functional import base as ovn_base class TestOvnOctaviaProviderDriver(ovn_base.TestOvnOctaviaBase): def test_loadbalancer(self): lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) self._update_load_balancer_and_validate(lb_data, admin_state_up=False) self._update_load_balancer_and_validate(lb_data, admin_state_up=True) self._delete_load_balancer_and_validate(lb_data) # create load balance with admin state down lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}, admin_state_up=False) self._delete_load_balancer_and_validate(lb_data) def test_delete_lb_on_nonexisting_lb(self): # LoadBalancer doesnt exist anymore, so just create a model and delete lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '19.0.0.0/24'}, only_model=True) self.ovn_driver.loadbalancer_delete(lb_data['model']) expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "DELETED", "operating_status": "OFFLINE"}], 'listeners': [], 'pools': [], 'members': [], } del lb_data['model'] self._wait_for_status_and_validate(lb_data, [expected_status]) def test_pool(self): lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) self._create_pool_and_validate(lb_data, "p_TCP_1", protocol='TCP') self._update_pool_and_validate(lb_data, "p_TCP_1") self._create_pool_and_validate(lb_data, "p_UDP_1", protocol='UDP') self._create_pool_and_validate(lb_data, "p_SCTP_1", protocol='SCTP') self._create_pool_and_validate(lb_data, "p_TCP_2", protocol='TCP') self._update_pool_and_validate(lb_data, "p_TCP_2", admin_state_up=False) self._update_pool_and_validate(lb_data, "p_TCP_2", admin_state_up=True) self._update_pool_and_validate(lb_data, "p_TCP_2", admin_state_up=False) self._create_pool_and_validate(lb_data, "p_UDP_2", protocol='UDP') self._create_pool_and_validate(lb_data, "p_SCTP_2", protocol='SCTP') self._delete_pool_and_validate(lb_data, "p_SCTP_1") self._delete_pool_and_validate(lb_data, "p_UDP_1") self._delete_pool_and_validate(lb_data, "p_TCP_1") self._delete_load_balancer_and_validate(lb_data) def test_member(self): lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) # TCP Pool self._create_pool_and_validate(lb_data, "p_TCP", protocol='TCP') # UDP Pool self._create_pool_and_validate(lb_data, "p_UDP", protocol='UDP') # SCTP Pool self._create_pool_and_validate(lb_data, "p_SCTP", protocol='SCTP') pool_TCP_id = lb_data['pools'][0].pool_id pool_UDP_id = lb_data['pools'][1].pool_id pool_SCTP_id = lb_data['pools'][2].pool_id # Members for TCP Pool self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._update_member_and_validate(lb_data, pool_TCP_id, "10.0.0.10") self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.11') # Members for UDP Pool self._create_member_and_validate( lb_data, pool_UDP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._update_member_and_validate(lb_data, pool_UDP_id, "10.0.0.10") self._create_member_and_validate( lb_data, pool_UDP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.11') # Members for SCTP Pool self._create_member_and_validate( lb_data, pool_SCTP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._update_member_and_validate(lb_data, pool_SCTP_id, "10.0.0.10") self._create_member_and_validate( lb_data, pool_SCTP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.11') # Disable loadbalancer self._update_load_balancer_and_validate(lb_data, admin_state_up=False) # Enable loadbalancer back self._update_load_balancer_and_validate(lb_data, admin_state_up=True) # Delete members from TCP Pool self._delete_member_and_validate(lb_data, pool_TCP_id, lb_data['vip_net_info'][0], '10.0.0.10') self._delete_member_and_validate(lb_data, pool_TCP_id, lb_data['vip_net_info'][0], '10.0.0.11') # Add again member to TCP Pool self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') # Create new networks and add member to TCP pool from it. net20_info = self._create_net('net20', '20.0.0.0/24') net20 = net20_info[0] subnet20 = net20_info[1] self._create_member_and_validate(lb_data, pool_TCP_id, subnet20, net20, '20.0.0.4') self._create_member_and_validate(lb_data, pool_TCP_id, subnet20, net20, '20.0.0.6') net30_info = self._create_net('net30', '30.0.0.0/24') net30 = net30_info[0] subnet30 = net30_info[1] self._create_member_and_validate(lb_data, pool_TCP_id, subnet30, net30, '30.0.0.6') self._delete_member_and_validate(lb_data, pool_TCP_id, net20, '20.0.0.6') # Deleting the pool should also delete the members. self._delete_pool_and_validate(lb_data, "p_TCP") # Delete the whole LB. self._delete_load_balancer_and_validate(lb_data) def test_member_no_subnet(self): self._o_driver_lib.get_pool.return_value = None # Test creating Member without subnet and unknown pool m_member = self._create_member_model('pool_from_nowhere', None, '30.0.0.7', 80) self.assertRaises(o_exceptions.UnsupportedOptionError, self.ovn_driver.member_create, m_member) lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) # TCP Pool self._create_pool_and_validate(lb_data, "p_TCP", protocol='TCP') pool_TCP_id = lb_data['pools'][0].pool_id self._o_driver_lib.get_pool.return_value = lb_data['pools'][0] self._o_driver_lib.get_loadbalancer.return_value = lb_data['model'] # Test creating Member without subnet but with pool self._create_member_and_validate( lb_data, pool_TCP_id, None, lb_data['vip_net_info'][0], '10.0.0.10', expected_subnet=lb_data['vip_net_info'][1]) # Deleting the pool should also delete the members. self._delete_pool_and_validate(lb_data, "p_TCP") # Delete the whole LB. self._delete_load_balancer_and_validate(lb_data) def test_listener(self): lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) self._create_pool_and_validate(lb_data, "p_TCP", protocol='TCP') self._create_pool_and_validate(lb_data, "p_UDP", protocol='UDP') self._create_pool_and_validate(lb_data, "p_SCTP", protocol='SCTP') pool_TCP_id = lb_data['pools'][0].pool_id pool_UDP_id = lb_data['pools'][1].pool_id pool_SCTP_id = lb_data['pools'][2].pool_id net_info = self._create_net('net1', '20.0.0.0/24') # Create member in TCP pool self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.4') self._create_member_and_validate(lb_data, pool_TCP_id, net_info[1], net_info[0], '20.0.0.4') # Create member in UDP pool self._create_member_and_validate( lb_data, pool_UDP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.4') self._create_member_and_validate(lb_data, pool_UDP_id, net_info[1], net_info[0], '20.0.0.4') # Create member in SCTP pool self._create_member_and_validate( lb_data, pool_SCTP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.4') self._create_member_and_validate(lb_data, pool_SCTP_id, net_info[1], net_info[0], '20.0.0.4') # Play around first listener linked to TCP pool self._create_listener_and_validate( lb_data, pool_TCP_id, 80, protocol='TCP') self._update_listener_and_validate(lb_data, protocol_port=80) self._update_listener_and_validate( lb_data, protocol_port=80, admin_state_up=True) self._update_listener_and_validate( lb_data, protocol_port=80, admin_state_up=False) self._update_listener_and_validate( lb_data, protocol_port=80, admin_state_up=True) self._create_listener_and_validate( lb_data, pool_TCP_id, protocol_port=82, protocol='TCP') # Play around second listener linked to UDP pool self._create_listener_and_validate( lb_data, pool_UDP_id, 53, protocol='UDP') self._update_listener_and_validate(lb_data, 53, protocol='UDP') self._update_listener_and_validate( lb_data, protocol_port=53, protocol='UDP', admin_state_up=True) self._update_listener_and_validate( lb_data, protocol_port=53, protocol='UDP', admin_state_up=False) self._update_listener_and_validate( lb_data, protocol_port=53, protocol='UDP', admin_state_up=True) self._create_listener_and_validate( lb_data, pool_UDP_id, protocol_port=21, protocol='UDP') # Play around third listener linked to SCTP pool self._create_listener_and_validate( lb_data, pool_SCTP_id, 8081, protocol='SCTP') self._update_listener_and_validate(lb_data, 8081, protocol='SCTP') self._update_listener_and_validate( lb_data, protocol_port=8081, protocol='SCTP', admin_state_up=True) self._update_listener_and_validate( lb_data, protocol_port=8081, protocol='SCTP', admin_state_up=False) self._update_listener_and_validate( lb_data, protocol_port=8081, protocol='SCTP', admin_state_up=True) self._create_listener_and_validate( lb_data, pool_SCTP_id, protocol_port=8082, protocol='SCTP') # Delete listeners linked to TCP pool self._delete_listener_and_validate( lb_data, protocol_port=82, protocol='TCP') self._delete_listener_and_validate( lb_data, protocol_port=80, protocol='TCP') # Delete TCP pool members self._delete_member_and_validate(lb_data, pool_TCP_id, net_info[0], '20.0.0.4') self._delete_member_and_validate(lb_data, pool_TCP_id, lb_data['vip_net_info'][0], '10.0.0.4') # Delete empty, TCP pool self._delete_pool_and_validate(lb_data, "p_TCP") # Delete the rest self._delete_load_balancer_and_validate(lb_data) def _test_cascade_delete(self, pool=True, listener=True, member=True): lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) if pool: self._create_pool_and_validate(lb_data, "p_TCP", protocol='TCP') self._create_pool_and_validate(lb_data, "p_UDP", protocol='UDP') self._create_pool_and_validate(lb_data, "p_SCTP", protocol='SCTP') pool_TCP_id = lb_data['pools'][0].pool_id pool_UDP_id = lb_data['pools'][1].pool_id pool_SCTP_id = lb_data['pools'][2].pool_id if member: self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._create_member_and_validate( lb_data, pool_UDP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._create_member_and_validate( lb_data, pool_SCTP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') if listener: self._create_listener_and_validate( lb_data, pool_TCP_id, protocol_port=80, protocol='TCP') self._create_listener_and_validate( lb_data, pool_UDP_id, protocol_port=53, protocol='UDP') self._create_listener_and_validate( lb_data, pool_SCTP_id, protocol_port=8081, protocol='SCTP') self._delete_load_balancer_and_validate(lb_data, cascade=True) def test_lb_listener_pools_cascade(self): self._test_cascade_delete(member=False) def test_lb_pool_cascade(self): self._test_cascade_delete(member=False, listener=False) def test_cascade_delete(self): self._test_cascade_delete() def test_for_unsupported_options(self): lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) m_pool = self._create_pool_model(lb_data['model'].loadbalancer_id, 'lb1') m_pool.protocol = o_constants.PROTOCOL_HTTP self.assertRaises(o_exceptions.UnsupportedOptionError, self.ovn_driver.pool_create, m_pool) self.assertRaises(o_exceptions.UnsupportedOptionError, self.ovn_driver.loadbalancer_failover, lb_data['model'].loadbalancer_id) m_listener = self._create_listener_model( lb_data['model'].loadbalancer_id, m_pool.pool_id, 80) m_listener.protocol = o_constants.PROTOCOL_HTTP self.assertRaises(o_exceptions.UnsupportedOptionError, self.ovn_driver.listener_create, m_listener) self._create_listener_and_validate(lb_data) self._delete_load_balancer_and_validate(lb_data) def test_lb_listener_pool_workflow(self): lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) self._create_listener_and_validate(lb_data) self._create_pool_and_validate( lb_data, "p1", listener_id=lb_data['listeners'][0].listener_id) self._delete_pool_and_validate( lb_data, "p1", listener_id=lb_data['listeners'][0].listener_id) self._delete_listener_and_validate(lb_data) self._delete_load_balancer_and_validate(lb_data) def test_lb_member_batch_update(self): # Create a LoadBalancer lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) # Create a pool self._create_pool_and_validate(lb_data, "p1") pool_id = lb_data['pools'][0].pool_id # Create Member-1 and associate it with lb_data self._create_member_and_validate( lb_data, pool_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') # Create Member-2 m_member = self._create_member_model(pool_id, lb_data['vip_net_info'][1], '10.0.0.12') # Update ovn's Logical switch reference self._update_ls_refs(lb_data, lb_data['vip_net_info'][0]) lb_data['pools'][0].members.append(m_member) # Add a new member to the LB members = [m_member] + [lb_data['pools'][0].members[0]] self._update_members_in_batch_and_validate(lb_data, pool_id, members) # Deleting one member, while keeping the other member available self._update_members_in_batch_and_validate(lb_data, pool_id, [m_member]) self._delete_load_balancer_and_validate(lb_data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/functional/test_integration.py0000664000175000017500000001561200000000000031301 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider.common import utils from ovn_octavia_provider.tests.functional import base as ovn_base from neutron_lib.api.definitions import floating_ip_port_forwarding as pf_def from neutron_lib.utils import runtime from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils LOG = logging.getLogger(__name__) class TestOvnOctaviaProviderIntegration(ovn_base.TestOvnOctaviaBase): def setUp(self): super().setUp() # Add port_forwarding as a configured service plugin (if needed) svc_plugins = set(cfg.CONF.service_plugins) svc_plugins.add("port_forwarding") cfg.CONF.set_override("service_plugins", list(svc_plugins)) if not self.pf_plugin: # OVN does not use RPC: disable it for port-forwarding tests self.pf_plugin = self._load_port_forwarding_class() self.pf_plugin._rpc_notifications_required = False self.assertIsNotNone(self.pf_plugin, "TestOVNFunctionalBase is expected to have " "port forwarding plugin configured") @staticmethod def _load_port_forwarding_class(): """Load port forwarding plugin :returns: instance of plugin that is loaded :raises ImportError: if fails to load plugin """ try: loaded_class = runtime.load_class_by_alias_or_classname( 'neutron.service_plugins', 'port_forwarding') return loaded_class() except ImportError: with excutils.save_and_reraise_exception(): LOG.error("Error loading port_forwarding plugin") def _find_pf_lb(self, router_id, fip_id=None): result = [] for ovn_lb in self.nb_api.get_router_floatingip_lbs( utils.ovn_name(router_id)): ext_ids = ovn_lb.external_ids if not fip_id or fip_id == ext_ids[ovn_const.OVN_FIP_EXT_ID_KEY]: result.append(ovn_lb) return result or None def _loadbalancer_operation(self, lb_data=None, update=False, delete=False): if not lb_data: lb_data = self._create_load_balancer_and_validate( {'vip_network': 'vip_network', 'cidr': '10.0.0.0/24'}) if update: self._update_load_balancer_and_validate(lb_data, admin_state_up=False) self._update_load_balancer_and_validate(lb_data, admin_state_up=True) if delete: self._delete_load_balancer_and_validate(lb_data) return None if delete else lb_data def _validate_from_lb_data(self, lb_data): expected_lbs = self._make_expected_lbs(lb_data) self._validate_loadbalancers(expected_lbs) def test_port_forwarding(self): def _verify_pf_lb(test, protocol, vip_ext_port, vip_int_port): ovn_lbs = test._find_pf_lb(router_id, fip_id) test.assertEqual(len(ovn_lbs), 1) test.assertEqual(ovn_lbs[0].name, 'pf-floatingip-{}-{}'.format(fip_id, protocol)) self.assertEqual(ovn_lbs[0].vips, { '{}:{}'.format(fip_ip, vip_ext_port): '{}:{}'.format(p1_ip, vip_int_port)}) n1, s1 = self._create_provider_network() ext_net = n1['network'] ext_subnet = s1['subnet'] gw_info = { 'enable_snat': True, 'network_id': ext_net['id'], 'external_fixed_ips': [ {'ip_address': '100.0.0.2', 'subnet_id': ext_subnet['id']}]} router_id = self._create_router('routertest', gw_info=gw_info) # Create Network N2, connect it to router n2_id, sub2_id, p1_ip, p1_id = self._create_net( "N2", "10.0.1.0/24", router_id) fip_info = {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': ext_net['id'], 'port_id': None, 'fixed_ip_address': None}} fip = self.l3_plugin.create_floatingip(self.context, fip_info) fip_id = fip['id'] fip_ip = fip['floating_ip_address'] # Create floating ip port forwarding. This will create an # OVN load balancer fip_pf_args = { pf_def.EXTERNAL_PORT: 2222, pf_def.INTERNAL_PORT: 22, pf_def.INTERNAL_PORT_ID: p1_id, pf_def.PROTOCOL: 'tcp', pf_def.INTERNAL_IP_ADDRESS: p1_ip} fip_attrs = {pf_def.RESOURCE_NAME: {pf_def.RESOURCE_NAME: fip_pf_args}} pf_obj = self.pf_plugin.create_floatingip_port_forwarding( self.context, fip_id, **fip_attrs) # Check pf_lb with no octavia_provider_lb _verify_pf_lb(self, 'tcp', 2222, 22) # Create octavia_provider_lb lb_data = self._loadbalancer_operation() expected_lbs = self._make_expected_lbs(lb_data) _verify_pf_lb(self, 'tcp', 2222, 22) fip_pf_args2 = {pf_def.EXTERNAL_PORT: 5353, pf_def.INTERNAL_PORT: 53, pf_def.PROTOCOL: 'udp'} fip_attrs2 = {pf_def.RESOURCE_NAME: { pf_def.RESOURCE_NAME: fip_pf_args2}} self.pf_plugin.update_floatingip_port_forwarding( self.context, pf_obj['id'], fip_id, **fip_attrs2) # Make sure octavia_provider_lb is not disturbed self._validate_loadbalancers(expected_lbs) # Update octavia_provider_lb self._loadbalancer_operation(lb_data, update=True) _verify_pf_lb(self, 'udp', 5353, 53) # Delete octavia_provider_lb self._loadbalancer_operation(lb_data, delete=True) _verify_pf_lb(self, 'udp', 5353, 53) # Delete pf_lb after creating octavia_provider_lb lb_data = self._loadbalancer_operation() expected_lbs = self._make_expected_lbs(lb_data) self.pf_plugin.delete_floatingip_port_forwarding( self.context, pf_obj['id'], fip_id) self._loadbalancer_operation(lb_data, update=True) self.assertIsNone(self._find_pf_lb(router_id, fip_id)) # Make sure octavia_provider_lb is not disturbed self._validate_loadbalancers(expected_lbs) self._loadbalancer_operation(lb_data, delete=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4565885 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/0000775000175000017500000000000000000000000024155 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/__init__.py0000664000175000017500000000000000000000000026254 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/base.py0000664000175000017500000000462700000000000025452 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from neutron.tests import base from octavia_lib.api.drivers import driver_lib from oslo_utils import uuidutils class TestOvnOctaviaBase(base.BaseTestCase): def setUp(self): super().setUp() self.listener_id = uuidutils.generate_uuid() self.loadbalancer_id = uuidutils.generate_uuid() self.pool_id = uuidutils.generate_uuid() self.member_id = uuidutils.generate_uuid() self.member_subnet_id = uuidutils.generate_uuid() self.member_port = '1010' self.member_pool_id = self.pool_id self.member_address = '192.168.2.149' self.port1_id = uuidutils.generate_uuid() self.port2_id = uuidutils.generate_uuid() self.project_id = uuidutils.generate_uuid() self.vip_network_id = uuidutils.generate_uuid() self.vip_port_id = uuidutils.generate_uuid() self.vip_subnet_id = uuidutils.generate_uuid() self.healthmonitor_id = uuidutils.generate_uuid() ovn_nb_idl = mock.patch( 'ovn_octavia_provider.ovsdb.impl_idl_ovn.OvnNbIdlForLb') self.mock_ovn_nb_idl = ovn_nb_idl.start() ovn_sb_idl = mock.patch( 'ovn_octavia_provider.ovsdb.impl_idl_ovn.OvnSbIdlForLb') self.mock_ovn_sb_idl = ovn_sb_idl.start() self.member_address = '192.168.2.149' self.vip_address = '192.148.210.109' self.vip_dict = {'vip_network_id': uuidutils.generate_uuid(), 'vip_subnet_id': uuidutils.generate_uuid()} self.vip_output = {'vip_network_id': self.vip_dict['vip_network_id'], 'vip_subnet_id': self.vip_dict['vip_subnet_id']} mock.patch( 'ovsdbapp.backend.ovs_idl.idlutils.get_schema_helper').start() mock.patch.object( driver_lib.DriverLibrary, '_check_for_socket_ready').start() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4565885 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/common/0000775000175000017500000000000000000000000025445 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/common/__init__.py0000664000175000017500000000000000000000000027544 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/common/test_clients.py0000664000175000017500000000665300000000000030531 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from oslotest import base from ovn_octavia_provider.common import clients class TestKeystoneSession(base.BaseTestCase): @mock.patch( 'keystoneauth1.loading.register_auth_conf_options') @mock.patch( 'keystoneauth1.loading.register_session_conf_options') def test_init(self, kl_rs, kl_ra): clients.KeystoneSession() kl_ra.assert_called_once_with(mock.ANY, 'service_auth') kl_rs.assert_called_once_with(mock.ANY, 'service_auth') @mock.patch( 'keystoneauth1.loading.load_session_from_conf_options') def test_cached_session(self, kl): ksession = clients.KeystoneSession() self.assertIs( ksession.session, ksession.session) kl.assert_called_once_with( mock.ANY, 'service_auth', auth=ksession.auth) @mock.patch( 'keystoneauth1.loading.load_auth_from_conf_options') def test_cached_auth(self, kl): ksession = clients.KeystoneSession() self.assertIs( ksession.auth, ksession.auth) kl.assert_called_once_with(mock.ANY, 'service_auth') class TestNeutronAuth(base.BaseTestCase): def setUp(self): super().setUp() self.mock_client = mock.patch( 'neutronclient.neutron.client.Client').start() self.client_args = { 'endpoint': 'foo_endpoint', 'region': 'foo_region', 'endpoint_type': 'foo_endpoint_type', 'service_name': 'foo_service_name', 'insecure': 'foo_insecure', 'ca_cert': 'foo_ca_cert'} clients.Singleton._instances = {} @mock.patch.object(clients, 'KeystoneSession') def test_init(self, mock_ks): clients.NeutronAuth(**self.client_args) self.mock_client.assert_called_once_with( '2.0', endpoint_override=self.client_args['endpoint'], region_name=self.client_args['region'], endpoint_type=self.client_args['endpoint_type'], service_name=self.client_args['service_name'], insecure=self.client_args['insecure'], ca_cert=self.client_args['ca_cert'], session=mock_ks().session) def test_singleton(self): c1 = clients.NeutronAuth(**self.client_args) c2 = clients.NeutronAuth(**self.client_args) self.assertIs(c1, c2) def test_singleton_exception(self): with mock.patch( 'neutronclient.neutron.client.Client', side_effect=[RuntimeError, 'foo', 'foo']) as n_cli: self.assertRaises( RuntimeError, clients.NeutronAuth, **self.client_args) c2 = clients.NeutronAuth(**self.client_args) c3 = clients.NeutronAuth(**self.client_args) self.assertIs(c2, c3) self.assertEqual(n_cli._mock_call_count, 2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/fakes.py0000664000175000017500000002335400000000000025627 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from unittest import mock from octavia_lib.api.drivers import data_models from oslo_utils import uuidutils from ovn_octavia_provider.common import constants from ovn_octavia_provider.common import utils class FakeResource(dict): def __init__(self, manager=None, info=None, loaded=False, methods=None): """Set attributes and methods for a resource. :param manager: The resource manager :param Dictionary info: A dictionary with all attributes :param bool loaded: True if the resource is loaded in memory :param Dictionary methods: A dictionary with all methods """ info = info or {} super().__init__(info) methods = methods or {} self.__name__ = type(self).__name__ self.manager = manager self._info = info self._add_details(info) self._add_methods(methods) self._loaded = loaded # Add a revision number by default setattr(self, 'revision_number', 1) @property def db_obj(self): return self def _add_details(self, info): for (k, v) in info.items(): setattr(self, k, v) def _add_methods(self, methods): """Fake methods with MagicMock objects. For each <@key, @value> pairs in methods, add an callable MagicMock object named @key as an attribute, and set the mock's return_value to @value. When users access the attribute with (), @value will be returned, which looks like a function call. """ for (name, ret) in methods.items(): method = mock.MagicMock(return_value=ret) setattr(self, name, method) def __repr__(self): reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and k != 'manager') info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) return "<%s %s>" % (self.__class__.__name__, info) def keys(self): return self._info.keys() def info(self): return self._info def update(self, info): super().update(info) self._add_details(info) class FakeOvsdbRow(FakeResource): """Fake one or more OVSDB rows.""" @staticmethod def create_one_ovsdb_row(attrs=None, methods=None): """Create a fake OVSDB row. :param Dictionary attrs: A dictionary with all attributes :param Dictionary methods: A dictionary with all methods :return: A FakeResource object faking the OVSDB row """ attrs = attrs or {} methods = methods or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() ovsdb_row_attrs = { 'uuid': fake_uuid, 'name': 'name-' + fake_uuid, 'external_ids': {}, } # Set default methods. ovsdb_row_methods = { 'addvalue': None, 'delete': None, 'delvalue': None, 'verify': None, 'setkey': None, } # Overwrite default attributes and methods. ovsdb_row_attrs.update(attrs) ovsdb_row_methods.update(methods) return FakeResource(info=copy.deepcopy(ovsdb_row_attrs), loaded=True, methods=copy.deepcopy(ovsdb_row_methods)) class FakeSubnet(): """Fake one or more subnets.""" @staticmethod def create_one_subnet(attrs=None): """Create a fake subnet. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the subnet """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() subnet_attrs = { 'id': 'subnet-id-' + fake_uuid, 'name': 'subnet-name-' + fake_uuid, 'network_id': 'network-id-' + fake_uuid, 'cidr': '10.10.10.0/24', 'tenant_id': 'project-id-' + fake_uuid, 'enable_dhcp': True, 'dns_nameservers': [], 'allocation_pools': [], 'host_routes': [], 'ip_version': 4, 'gateway_ip': '10.10.10.1', 'ipv6_address_mode': 'None', 'ipv6_ra_mode': 'None', 'subnetpool_id': None, } # Overwrite default attributes. subnet_attrs.update(attrs) return FakeResource(info=copy.deepcopy(subnet_attrs), loaded=True) class FakeOVNPort(): """Fake one or more ports.""" @staticmethod def create_one_port(attrs=None): """Create a fake ovn port. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the port """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() port_attrs = { 'addresses': [], 'dhcpv4_options': '', 'dhcpv6_options': [], 'enabled': True, 'external_ids': {}, 'name': fake_uuid, 'options': {}, 'parent_name': [], 'port_security': [], 'tag': [], 'tag_request': [], 'type': '', 'up': False, } # Overwrite default attributes. port_attrs.update(attrs) return type('Logical_Switch_Port', (object, ), port_attrs) @staticmethod def from_neutron_port(port): """Create a fake ovn port based on a neutron port.""" external_ids = { constants.OVN_NETWORK_NAME_EXT_ID_KEY: utils.ovn_name(port['network_id']), constants.OVN_SG_IDS_EXT_ID_KEY: ' '.join(port['security_groups']), constants.OVN_DEVICE_OWNER_EXT_ID_KEY: port.get('device_owner', '')} addresses = [port['mac_address'], ] addresses += [x['ip_address'] for x in port.get('fixed_ips', [])] port_security = ( addresses + [x['ip_address'] for x in port.get('allowed_address_pairs', [])]) return FakeOVNPort.create_one_port( {'external_ids': external_ids, 'addresses': addresses, 'port_security': port_security}) class FakeOVNRouter(): @staticmethod def create_one_router(attrs=None): router_attrs = { 'enabled': False, 'external_ids': {}, 'load_balancer': [], 'name': '', 'nat': [], 'options': {}, 'ports': [], 'static_routes': [], } # Overwrite default attributes. router_attrs.update(attrs) return type('Logical_Router', (object, ), router_attrs) class FakePort(): """Fake one or more ports.""" @staticmethod def create_one_port(attrs=None): """Create a fake port. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the port """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() port_attrs = { 'admin_state_up': True, 'allowed_address_pairs': [{}], 'binding:host_id': 'binding-host-id-' + fake_uuid, 'binding:profile': {}, 'binding:vif_details': {}, 'binding:vif_type': 'ovs', 'binding:vnic_type': 'normal', 'device_id': 'device-id-' + fake_uuid, 'device_owner': 'compute:nova', 'dns_assignment': [{}], 'dns_name': 'dns-name-' + fake_uuid, 'extra_dhcp_opts': [{}], 'fixed_ips': [{'subnet_id': 'subnet-id-' + fake_uuid, 'ip_address': '10.10.10.20'}], 'id': 'port-id-' + fake_uuid, 'mac_address': 'fa:16:3e:a9:4e:72', 'name': 'port-name-' + fake_uuid, 'network_id': 'network-id-' + fake_uuid, 'port_security_enabled': True, 'security_groups': [], 'status': 'ACTIVE', 'tenant_id': 'project-id-' + fake_uuid, } # Overwrite default attributes. port_attrs.update(attrs) return FakeResource(info=copy.deepcopy(port_attrs), loaded=True) class FakeLB(data_models.LoadBalancer): def __init__(self, *args, **kwargs): self.external_ids = kwargs.pop('ext_ids') self.uuid = kwargs.pop('uuid') super().__init__(*args, **kwargs) def __hash__(self): # Required for Python3, not for Python2 return self.__sizeof__() class FakePool(data_models.Pool): def __init__(self, *args, **kwargs): self.uuid = kwargs.pop('uuid') super().__init__(*args, **kwargs) def __hash__(self): # Required for Python3, not for Python2 return self.__sizeof__() class FakeMember(data_models.Member): def __init__(self, *args, **kwargs): self.uuid = kwargs.pop('uuid') super().__init__(*args, **kwargs) def __hash__(self): # Required for Python3, not for Python2 return self.__sizeof__() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4565885 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/hacking/0000775000175000017500000000000000000000000025561 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/hacking/__init__.py0000664000175000017500000000000000000000000027660 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/hacking/test_checks.py0000664000175000017500000002461300000000000030440 0ustar00zuulzuul00000000000000# Copyright 2015 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from oslotest import base from ovn_octavia_provider.hacking import checks class HackingTestCase(base.BaseTestCase): """Hacking test class. This class tests the hacking checks in ovn_octavia_provider.hacking.checks by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ def assertLinePasses(self, func, *args): with testtools.ExpectedException(StopIteration): next(func(*args)) def assertLineFails(self, func, *args): self.assertIsInstance(next(func(*args)), tuple) def test_assert_called_once_with(self): fail_code1 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assertCalledOnceWith() """ fail_code2 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.called_once_with() """ fail_code3 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_has_called() """ pass_code = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_called_once_with() """ pass_code2 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_has_calls() """ self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code1, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code2, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assert_called_once_with(pass_code, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code3, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assert_called_once_with(pass_code2, "ovn_octavia_provider/tests/test_assert.py")))) def test_asserttruefalse(self): true_fail_code1 = """ test_bool = True self.assertEqual(True, test_bool) """ true_fail_code2 = """ test_bool = True self.assertEqual(test_bool, True) """ true_pass_code = """ test_bool = True self.assertTrue(test_bool) """ false_fail_code1 = """ test_bool = False self.assertEqual(False, test_bool) """ false_fail_code2 = """ test_bool = False self.assertEqual(test_bool, False) """ false_pass_code = """ test_bool = False self.assertFalse(test_bool) """ self.assertEqual( 1, len(list(checks.check_asserttruefalse(true_fail_code1, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_asserttruefalse(true_fail_code2, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_asserttruefalse(true_pass_code, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_asserttruefalse(false_fail_code1, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_asserttruefalse(false_fail_code2, "ovn_octavia_provider/tests/test_assert.py")))) self.assertFalse( list(checks.check_asserttruefalse(false_pass_code, "ovn_octavia_provider/tests/test_assert.py"))) def test_assertempty(self): fail_code = """ test_empty = %s self.assertEqual(test_empty, %s) """ pass_code1 = """ test_empty = %s self.assertEqual(%s, test_empty) """ pass_code2 = """ self.assertEqual(123, foo(abc, %s)) """ empty_cases = ['{}', '[]', '""', "''", '()', 'set()'] for ec in empty_cases: self.assertEqual( 1, len(list(checks.check_assertempty(fail_code % (ec, ec), "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_asserttruefalse(pass_code1 % (ec, ec), "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_asserttruefalse(pass_code2 % ec, "ovn_octavia_provider/tests/test_assert.py")))) def test_assertisinstance(self): fail_code = """ self.assertTrue(isinstance(observed, ANY_TYPE)) """ pass_code1 = """ self.assertEqual(ANY_TYPE, type(observed)) """ pass_code2 = """ self.assertIsInstance(observed, ANY_TYPE) """ self.assertEqual( 1, len(list(checks.check_assertisinstance(fail_code, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertisinstance(pass_code1, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertisinstance(pass_code2, "ovn_octavia_provider/tests/test_assert.py")))) def test_assertequal_for_httpcode(self): fail_code = """ self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) """ pass_code = """ self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) """ self.assertEqual( 1, len(list(checks.check_assertequal_for_httpcode(fail_code, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertequal_for_httpcode(pass_code, "ovn_octavia_provider/tests/test_assert.py")))) def test_check_no_imports_from_tests(self): fail_codes = ('from ovn_octavia_provider import tests', 'from ovn_octavia_provider.tests import base', 'import ovn_octavia_provider.tests.base') for fail_code in fail_codes: self.assertEqual( 1, len(list(checks.check_no_imports_from_tests(fail_code, "ovn_octavia_provider/common/utils.py")))) self.assertEqual( 0, len(list(checks.check_no_imports_from_tests(fail_code, "ovn_octavia_provider/tests/test_fake.py")))) def test_check_python3_filter(self): f = checks.check_python3_no_filter self.assertLineFails(f, "filter(lambda obj: test(obj), data)") self.assertLinePasses(f, "[obj for obj in data if test(obj)]") self.assertLinePasses(f, "filter(function, range(0,10))") self.assertLinePasses(f, "lambda x, y: x+y") def test_check_no_import_mock(self): pass_line = 'from unittest import mock' fail_lines = ('import mock', 'import mock as mock_lib', 'from mock import patch') self.assertEqual( 0, len(list( checks.check_no_import_mock( pass_line, "ovn_octavia_provider/tests/test_fake.py", None)))) for fail_line in fail_lines: self.assertEqual( 0, len(list( checks.check_no_import_mock( fail_line, "ovn_octavia_provider/common/utils.py", None)))) self.assertEqual( 1, len(list( checks.check_no_import_mock( fail_line, "ovn_octavia_provider/tests/test_fake.py", None)))) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4565885 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/ovsdb/0000775000175000017500000000000000000000000025272 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/ovsdb/__init__.py0000664000175000017500000000000000000000000027371 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/ovsdb/test_impl_idl_ovn.py0000664000175000017500000001234100000000000031357 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from unittest import mock from neutron.tests import base from ovs.db import idl as ovs_idl from ovsdbapp.backend import ovs_idl as real_ovs_idl from ovsdbapp.backend.ovs_idl import idlutils from ovn_octavia_provider.ovsdb import impl_idl_ovn basedir = os.path.dirname(os.path.abspath(__file__)) schema_files = { 'OVN_Northbound': os.path.join(basedir, '..', 'schemas', 'ovn-nb.ovsschema'), 'OVN_Southbound': os.path.join(basedir, '..', 'schemas', 'ovn-sb.ovsschema')} class TestOvnNbIdlForLb(base.BaseTestCase): def setUp(self): super().setUp() # TODO(haleyb) - figure out why every test in this class generates # this warning, think it's in relation to reading this schema file: # sys:1: ResourceWarning: unclosed file <_io.FileIO name=1 mode='wb' # closefd=True> ResourceWarning: Enable tracemalloc to get the object # allocation traceback self.mock_gsh = mock.patch.object( idlutils, 'get_schema_helper', side_effect=lambda x, y: ovs_idl.SchemaHelper( location=schema_files['OVN_Northbound'])).start() self.idl = impl_idl_ovn.OvnNbIdlForLb() def test__get_ovsdb_helper(self): self.mock_gsh.reset_mock() self.idl._get_ovsdb_helper('foo') self.mock_gsh.assert_called_once_with('foo', 'OVN_Northbound') @mock.patch.object(real_ovs_idl.Backend, 'autocreate_indices', mock.Mock(), create=True) def test_start(self): with mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection', side_effect=lambda x, timeout: mock.Mock()): idl1 = impl_idl_ovn.OvnNbIdlForLb() ret1 = idl1.start() id1 = id(ret1.ovsdb_connection) idl2 = impl_idl_ovn.OvnNbIdlForLb() ret2 = idl2.start() id2 = id(ret2.ovsdb_connection) self.assertNotEqual(id1, id2) @mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection') def test_stop(self, mock_conn): mock_conn.stop.return_value = False with ( mock.patch.object( self.idl.notify_handler, 'shutdown')) as mock_notify, ( mock.patch.object(self.idl, 'close')) as mock_close: self.idl.start() self.idl.stop() mock_notify.assert_called_once_with() mock_close.assert_called_once_with() def test_setlock(self): with mock.patch.object(impl_idl_ovn.OvnNbIdlForLb, 'set_lock') as set_lock: self.idl = impl_idl_ovn.OvnNbIdlForLb(event_lock_name='foo') set_lock.assert_called_once_with('foo') class TestOvnSbIdlForLb(base.BaseTestCase): def setUp(self): super().setUp() # TODO(haleyb) - figure out why every test in this class generates # this warning, think it's in relation to reading this schema file: # sys:1: ResourceWarning: unclosed file <_io.FileIO name=1 mode='wb' # closefd=True> ResourceWarning: Enable tracemalloc to get the object # allocation traceback self.mock_gsh = mock.patch.object( idlutils, 'get_schema_helper', side_effect=lambda x, y: ovs_idl.SchemaHelper( location=schema_files['OVN_Southbound'])).start() self.idl = impl_idl_ovn.OvnSbIdlForLb() @mock.patch.object(real_ovs_idl.Backend, 'autocreate_indices', mock.Mock(), create=True) def test_start(self): with mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection', side_effect=lambda x, timeout: mock.Mock()): idl1 = impl_idl_ovn.OvnSbIdlForLb() ret1 = idl1.start() id1 = id(ret1.ovsdb_connection) idl2 = impl_idl_ovn.OvnSbIdlForLb() ret2 = idl2.start() id2 = id(ret2.ovsdb_connection) self.assertNotEqual(id1, id2) @mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection') def test_stop(self, mock_conn): mock_conn.stop.return_value = False with ( mock.patch.object( self.idl.notify_handler, 'shutdown')) as mock_notify, ( mock.patch.object(self.idl, 'close')) as mock_close: self.idl.start() self.idl.stop() mock_notify.assert_called_once_with() mock_close.assert_called_once_with() def test_setlock(self): with mock.patch.object(impl_idl_ovn.OvnSbIdlForLb, 'set_lock') as set_lock: self.idl = impl_idl_ovn.OvnSbIdlForLb(event_lock_name='foo') set_lock.assert_called_once_with('foo') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4565885 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/schemas/0000775000175000017500000000000000000000000025600 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/schemas/ovn-nb.ovsschema0000664000175000017500000006235600000000000030725 0ustar00zuulzuul00000000000000{ "name": "OVN_Northbound", "version": "5.23.0", "cksum": "111023208 25806", "tables": { "NB_Global": { "columns": { "name": {"type": "string"}, "nb_cfg": {"type": {"key": "integer"}}, "sb_cfg": {"type": {"key": "integer"}}, "hv_cfg": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "connections": { "type": {"key": {"type": "uuid", "refTable": "Connection"}, "min": 0, "max": "unlimited"}}, "ssl": { "type": {"key": {"type": "uuid", "refTable": "SSL"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ipsec": {"type": "boolean"}}, "maxRows": 1, "isRoot": true}, "Logical_Switch": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Logical_Switch_Port", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "acls": {"type": {"key": {"type": "uuid", "refTable": "ACL", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "qos_rules": {"type": {"key": {"type": "uuid", "refTable": "QoS", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "load_balancer": {"type": {"key": {"type": "uuid", "refTable": "Load_Balancer", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "dns_records": {"type": {"key": {"type": "uuid", "refTable": "DNS", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "other_config": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "forwarding_groups": { "type": {"key": {"type": "uuid", "refTable": "Forwarding_Group", "refType": "strong"}, "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Logical_Switch_Port": { "columns": { "name": {"type": "string"}, "type": {"type": "string"}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "parent_name": {"type": {"key": "string", "min": 0, "max": 1}}, "tag_request": { "type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 4095}, "min": 0, "max": 1}}, "tag": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4095}, "min": 0, "max": 1}}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "dynamic_addresses": {"type": {"key": "string", "min": 0, "max": 1}}, "port_security": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "up": {"type": {"key": "boolean", "min": 0, "max": 1}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "dhcpv4_options": {"type": {"key": {"type": "uuid", "refTable": "DHCP_Options", "refType": "weak"}, "min": 0, "max": 1}}, "dhcpv6_options": {"type": {"key": {"type": "uuid", "refTable": "DHCP_Options", "refType": "weak"}, "min": 0, "max": 1}}, "ha_chassis_group": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis_Group", "refType": "strong"}, "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "Forwarding_Group": { "columns": { "name": {"type": "string"}, "vip": {"type": "string"}, "vmac": {"type": "string"}, "liveness": {"type": "boolean"}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "child_port": {"type": {"key": "string", "min": 1, "max": "unlimited"}}}, "isRoot": false}, "Address_Set": { "columns": { "name": {"type": "string"}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Port_Group": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Logical_Switch_Port", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "acls": {"type": {"key": {"type": "uuid", "refTable": "ACL", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Load_Balancer": { "columns": { "name": {"type": "string"}, "vips": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp", "sctp"]]}, "min": 0, "max": 1}}, "health_check": {"type": { "key": {"type": "uuid", "refTable": "Load_Balancer_Health_Check", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ip_port_mappings": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "selection_fields": { "type": {"key": {"type": "string", "enum": ["set", ["eth_src", "eth_dst", "ip_src", "ip_dst", "tp_src", "tp_dst"]]}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Load_Balancer_Health_Check": { "columns": { "vip": {"type": "string"}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "ACL": { "columns": { "name": {"type": {"key": {"type": "string", "maxLength": 63}, "min": 0, "max": 1}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "direction": {"type": {"key": {"type": "string", "enum": ["set", ["from-lport", "to-lport"]]}}}, "match": {"type": "string"}, "action": {"type": {"key": {"type": "string", "enum": ["set", ["allow", "allow-related", "drop", "reject"]]}}}, "log": {"type": "boolean"}, "severity": {"type": {"key": {"type": "string", "enum": ["set", ["alert", "warning", "notice", "info", "debug"]]}, "min": 0, "max": 1}}, "meter": {"type": {"key": "string", "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "QoS": { "columns": { "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "direction": {"type": {"key": {"type": "string", "enum": ["set", ["from-lport", "to-lport"]]}}}, "match": {"type": "string"}, "action": {"type": {"key": {"type": "string", "enum": ["set", ["dscp"]]}, "value": {"type": "integer", "minInteger": 0, "maxInteger": 63}, "min": 0, "max": "unlimited"}}, "bandwidth": {"type": {"key": {"type": "string", "enum": ["set", ["rate", "burst"]]}, "value": {"type": "integer", "minInteger": 1, "maxInteger": 4294967295}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Meter": { "columns": { "name": {"type": "string"}, "unit": {"type": {"key": {"type": "string", "enum": ["set", ["kbps", "pktps"]]}}}, "bands": {"type": {"key": {"type": "uuid", "refTable": "Meter_Band", "refType": "strong"}, "min": 1, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Meter_Band": { "columns": { "action": {"type": {"key": {"type": "string", "enum": ["set", ["drop"]]}}}, "rate": {"type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4294967295}}}, "burst_size": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 4294967295}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Logical_Router": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Logical_Router_Port", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "static_routes": {"type": {"key": {"type": "uuid", "refTable": "Logical_Router_Static_Route", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "policies": { "type": {"key": {"type": "uuid", "refTable": "Logical_Router_Policy", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "nat": {"type": {"key": {"type": "uuid", "refTable": "NAT", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "load_balancer": {"type": {"key": {"type": "uuid", "refTable": "Load_Balancer", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Logical_Router_Port": { "columns": { "name": {"type": "string"}, "gateway_chassis": { "type": {"key": {"type": "uuid", "refTable": "Gateway_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ha_chassis_group": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis_Group", "refType": "strong"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "networks": {"type": {"key": "string", "min": 1, "max": "unlimited"}}, "mac": {"type": "string"}, "peer": {"type": {"key": "string", "min": 0, "max": 1}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "ipv6_ra_configs": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ipv6_prefix": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "Logical_Router_Static_Route": { "columns": { "ip_prefix": {"type": "string"}, "policy": {"type": {"key": {"type": "string", "enum": ["set", ["src-ip", "dst-ip"]]}, "min": 0, "max": 1}}, "nexthop": {"type": "string"}, "output_port": {"type": {"key": "string", "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Logical_Router_Policy": { "columns": { "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "match": {"type": "string"}, "action": {"type": { "key": {"type": "string", "enum": ["set", ["allow", "drop", "reroute"]]}}}, "nexthop": {"type": {"key": "string", "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "NAT": { "columns": { "external_ip": {"type": "string"}, "external_mac": {"type": {"key": "string", "min": 0, "max": 1}}, "external_port_range": {"type": "string"}, "logical_ip": {"type": "string"}, "logical_port": {"type": {"key": "string", "min": 0, "max": 1}}, "type": {"type": {"key": {"type": "string", "enum": ["set", ["dnat", "snat", "dnat_and_snat" ]]}}}, "options": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "DHCP_Options": { "columns": { "cidr": {"type": "string"}, "options": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Connection": { "columns": { "target": {"type": "string"}, "max_backoff": {"type": {"key": {"type": "integer", "minInteger": 1000}, "min": 0, "max": 1}}, "inactivity_probe": {"type": {"key": "integer", "min": 0, "max": 1}}, "other_config": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "is_connected": {"type": "boolean", "ephemeral": true}, "status": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}, "ephemeral": true}}, "indexes": [["target"]]}, "DNS": { "columns": { "records": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "SSL": { "columns": { "private_key": {"type": "string"}, "certificate": {"type": "string"}, "ca_cert": {"type": "string"}, "bootstrap_ca_cert": {"type": "boolean"}, "ssl_protocols": {"type": "string"}, "ssl_ciphers": {"type": "string"}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "maxRows": 1}, "Gateway_Chassis": { "columns": { "name": {"type": "string"}, "chassis_name": {"type": "string"}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "HA_Chassis": { "columns": { "chassis_name": {"type": "string"}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "HA_Chassis_Group": { "columns": { "name": {"type": "string"}, "ha_chassis": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/schemas/ovn-sb.ovsschema0000664000175000017500000006371000000000000030725 0ustar00zuulzuul00000000000000{ "name": "OVN_Southbound", "version": "20.17.0", "cksum": "669123379 26536", "tables": { "SB_Global": { "columns": { "nb_cfg": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "connections": { "type": {"key": {"type": "uuid", "refTable": "Connection"}, "min": 0, "max": "unlimited"}}, "ssl": { "type": {"key": {"type": "uuid", "refTable": "SSL"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ipsec": {"type": "boolean"}}, "maxRows": 1, "isRoot": true}, "Chassis": { "columns": { "name": {"type": "string"}, "hostname": {"type": "string"}, "encaps": {"type": {"key": {"type": "uuid", "refTable": "Encap"}, "min": 1, "max": "unlimited"}}, "vtep_logical_switches" : {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "nb_cfg": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "other_config": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "transport_zones" : {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true, "indexes": [["name"]]}, "Chassis_Private": { "columns": { "name": {"type": "string"}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "nb_cfg": {"type": {"key": "integer"}}, "nb_cfg_timestamp": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true, "indexes": [["name"]]}, "Encap": { "columns": { "type": {"type": {"key": { "type": "string", "enum": ["set", ["geneve", "stt", "vxlan"]]}}}, "options": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ip": {"type": "string"}, "chassis_name": {"type": "string"}}, "indexes": [["type", "ip"]]}, "Address_Set": { "columns": { "name": {"type": "string"}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Port_Group": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Logical_Flow": { "columns": { "logical_datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}, "min": 0, "max": 1}}, "logical_dp_group": {"type": {"key": {"type": "uuid", "refTable": "Logical_DP_Group"}, "min": 0, "max": 1}}, "pipeline": {"type": {"key": {"type": "string", "enum": ["set", ["ingress", "egress"]]}}}, "table_id": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32}}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 65535}}}, "match": {"type": "string"}, "actions": {"type": "string"}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Logical_DP_Group": { "columns": { "datapaths": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding", "refType": "weak"}, "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Multicast_Group": { "columns": { "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}}}, "name": {"type": "string"}, "tunnel_key": { "type": {"key": {"type": "integer", "minInteger": 32768, "maxInteger": 65535}}}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Port_Binding", "refType": "weak"}, "min": 0, "max": "unlimited"}}}, "indexes": [["datapath", "tunnel_key"], ["datapath", "name"]], "isRoot": true}, "Meter": { "columns": { "name": {"type": "string"}, "unit": {"type": {"key": {"type": "string", "enum": ["set", ["kbps", "pktps"]]}}}, "bands": {"type": {"key": {"type": "uuid", "refTable": "Meter_Band", "refType": "strong"}, "min": 1, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Meter_Band": { "columns": { "action": {"type": {"key": {"type": "string", "enum": ["set", ["drop"]]}}}, "rate": {"type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4294967295}}}, "burst_size": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 4294967295}}}}, "isRoot": false}, "Datapath_Binding": { "columns": { "tunnel_key": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 16777215}}}, "load_balancers": {"type": {"key": {"type": "uuid", "refTable": "Load_Balancer", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["tunnel_key"]], "isRoot": true}, "Port_Binding": { "columns": { "logical_port": {"type": "string"}, "type": {"type": "string"}, "gateway_chassis": { "type": {"key": {"type": "uuid", "refTable": "Gateway_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ha_chassis_group": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis_Group", "refType": "strong"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}}}, "tunnel_key": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 32767}}}, "parent_port": {"type": {"key": "string", "min": 0, "max": 1}}, "tag": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4095}, "min": 0, "max": 1}}, "virtual_parent": {"type": {"key": "string", "min": 0, "max": 1}}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "encap": {"type": {"key": {"type": "uuid", "refTable": "Encap", "refType": "weak"}, "min": 0, "max": 1}}, "mac": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "nat_addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "up": {"type": {"key": "boolean", "min": 0, "max": 1}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["datapath", "tunnel_key"], ["logical_port"]], "isRoot": true}, "MAC_Binding": { "columns": { "logical_port": {"type": "string"}, "ip": {"type": "string"}, "mac": {"type": "string"}, "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}}}}, "indexes": [["logical_port", "ip"]], "isRoot": true}, "DHCP_Options": { "columns": { "name": {"type": "string"}, "code": { "type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 254}}}, "type": { "type": {"key": { "type": "string", "enum": ["set", ["bool", "uint8", "uint16", "uint32", "ipv4", "static_routes", "str", "host_id", "domains"]]}}}}, "isRoot": true}, "DHCPv6_Options": { "columns": { "name": {"type": "string"}, "code": { "type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 254}}}, "type": { "type": {"key": { "type": "string", "enum": ["set", ["ipv6", "str", "mac"]]}}}}, "isRoot": true}, "Connection": { "columns": { "target": {"type": "string"}, "max_backoff": {"type": {"key": {"type": "integer", "minInteger": 1000}, "min": 0, "max": 1}}, "inactivity_probe": {"type": {"key": "integer", "min": 0, "max": 1}}, "read_only": {"type": "boolean"}, "role": {"type": "string"}, "other_config": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "is_connected": {"type": "boolean", "ephemeral": true}, "status": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}, "ephemeral": true}}, "indexes": [["target"]]}, "SSL": { "columns": { "private_key": {"type": "string"}, "certificate": {"type": "string"}, "ca_cert": {"type": "string"}, "bootstrap_ca_cert": {"type": "boolean"}, "ssl_protocols": {"type": "string"}, "ssl_ciphers": {"type": "string"}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "maxRows": 1}, "DNS": { "columns": { "records": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "datapaths": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}, "min": 1, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "RBAC_Role": { "columns": { "name": {"type": "string"}, "permissions": { "type": {"key": {"type": "string"}, "value": {"type": "uuid", "refTable": "RBAC_Permission", "refType": "weak"}, "min": 0, "max": "unlimited"}}}, "isRoot": true}, "RBAC_Permission": { "columns": { "table": {"type": "string"}, "authorization": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "insert_delete": {"type": "boolean"}, "update" : {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Gateway_Chassis": { "columns": { "name": {"type": "string"}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "HA_Chassis": { "columns": { "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "HA_Chassis_Group": { "columns": { "name": {"type": "string"}, "ha_chassis": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ref_chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Controller_Event": { "columns": { "event_type": {"type": {"key": {"type": "string", "enum": ["set", ["empty_lb_backends"]]}}}, "event_info": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "seq_num": {"type": {"key": "integer"}} }, "isRoot": true}, "IP_Multicast": { "columns": { "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding", "refType": "weak"}}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "querier": {"type": {"key": "boolean", "min": 0, "max": 1}}, "eth_src": {"type": "string"}, "ip4_src": {"type": "string"}, "ip6_src": {"type": "string"}, "table_size": {"type": {"key": "integer", "min": 0, "max": 1}}, "idle_timeout": {"type": {"key": "integer", "min": 0, "max": 1}}, "query_interval": {"type": {"key": "integer", "min": 0, "max": 1}}, "query_max_resp": {"type": {"key": "integer", "min": 0, "max": 1}}, "seq_no": {"type": "integer"}}, "indexes": [["datapath"]], "isRoot": true}, "IGMP_Group": { "columns": { "address": {"type": "string"}, "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding", "refType": "weak"}, "min": 0, "max": 1}}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Port_Binding", "refType": "weak"}, "min": 0, "max": "unlimited"}}}, "indexes": [["address", "datapath", "chassis"]], "isRoot": true}, "Service_Monitor": { "columns": { "ip": {"type": "string"}, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp"]]}, "min": 0, "max": 1}}, "port": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "logical_port": {"type": "string"}, "src_mac": {"type": "string"}, "src_ip": {"type": "string"}, "status": { "type": {"key": {"type": "string", "enum": ["set", ["online", "offline", "error"]]}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["logical_port", "ip", "port", "protocol"]], "isRoot": true}, "Load_Balancer": { "columns": { "name": {"type": "string"}, "vips": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp", "sctp"]]}, "min": 0, "max": 1}}, "datapaths": { "type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}, "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "BFD": { "columns": { "src_port": {"type": {"key": {"type": "integer", "minInteger": 49152, "maxInteger": 65535}}}, "disc": {"type": {"key": {"type": "integer"}}}, "logical_port": {"type": "string"}, "dst_ip": {"type": "string"}, "min_tx": {"type": {"key": {"type": "integer"}}}, "min_rx": {"type": {"key": {"type": "integer"}}}, "detect_mult": {"type": {"key": {"type": "integer"}}}, "status": { "type": {"key": {"type": "string", "enum": ["set", ["down", "init", "up", "admin_down"]]}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["logical_port", "dst_ip", "src_port", "disc"]], "isRoot": true}, "FDB": { "columns": { "mac": {"type": "string"}, "dp_key": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 16777215}}}, "port_key": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 16777215}}}}, "indexes": [["mac", "dp_key"]], "isRoot": true} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/test_agent.py0000664000175000017500000000220700000000000026665 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from ovn_octavia_provider import agent as ovn_agent from ovn_octavia_provider.tests.unit import base as ovn_base class TestOvnProviderAgent(ovn_base.TestOvnOctaviaBase): def test_exit(self): mock_exit_event = mock.MagicMock() mock_exit_event.is_set.side_effect = [False, False, False, False, True] ovn_agent.OvnProviderAgent(mock_exit_event) self.assertEqual(1, mock_exit_event.wait.call_count) self.assertEqual(2, self.mock_ovn_nb_idl.call_count) self.assertEqual(1, self.mock_ovn_sb_idl.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/test_driver.py0000664000175000017500000010754000000000000027070 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from unittest import mock from octavia_lib.api.drivers import data_models from octavia_lib.api.drivers import exceptions from octavia_lib.common import constants from oslo_utils import uuidutils from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider import driver as ovn_driver from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.tests.unit import base as ovn_base class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase): def setUp(self): super().setUp() self.driver = ovn_driver.OvnProviderDriver() add_req_thread = mock.patch.object(ovn_helper.OvnProviderHelper, 'add_request') self.member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.ovn_lb = mock.MagicMock() self.ovn_lb.name = 'foo_ovn_lb' self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4', 'pool_%s' % self.pool_id: self.member_line, 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} self.mock_add_request = add_req_thread.start() self.project_id = uuidutils.generate_uuid() self.fail_member = data_models.Member( address='198.51.100.4', admin_state_up=True, member_id=self.member_id, monitor_address="100.200.200.100", monitor_port=66, name='Amazin', pool_id=self.pool_id, protocol_port=99, subnet_id=self.member_subnet_id, weight=55) self.ref_member = data_models.Member( address='198.52.100.4', admin_state_up=True, member_id=self.member_id, monitor_address=data_models.Unset, monitor_port=data_models.Unset, name='Amazing', pool_id=self.pool_id, protocol_port=99, subnet_id=self.member_subnet_id, weight=55) self.update_member = data_models.Member( address='198.53.100.4', admin_state_up=False, member_id=self.member_id, monitor_address=data_models.Unset, monitor_port=data_models.Unset, name='Amazin', pool_id=self.pool_id, protocol_port=99, subnet_id=self.member_subnet_id, weight=55) self.ref_update_pool = data_models.Pool( admin_state_up=False, description='pool', name='Peter', lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT, loadbalancer_id=self.loadbalancer_id, listener_id=self.listener_id, members=[self.ref_member], pool_id=self.pool_id, protocol='TCP', session_persistence={'type': 'fix'}) self.ref_pool = data_models.Pool( admin_state_up=True, description='pool', name='Peter', lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT, loadbalancer_id=self.loadbalancer_id, listener_id=self.listener_id, members=[self.ref_member], pool_id=self.pool_id, protocol='TCP', session_persistence={'type': 'fix'}) self.ref_http_pool = data_models.Pool( admin_state_up=True, description='pool', lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT, loadbalancer_id=self.loadbalancer_id, listener_id=self.listener_id, members=[self.ref_member], name='Groot', pool_id=self.pool_id, protocol='HTTP', session_persistence={'type': 'fix'}) self.ref_lc_pool = data_models.Pool( admin_state_up=True, description='pool', lb_algorithm=constants.LB_ALGORITHM_LEAST_CONNECTIONS, loadbalancer_id=self.loadbalancer_id, listener_id=self.listener_id, members=[self.ref_member], name='Groot', pool_id=self.pool_id, protocol='HTTP', session_persistence={'type': 'fix'}) self.ref_listener = data_models.Listener( admin_state_up=False, connection_limit=5, default_pool=self.ref_pool, default_pool_id=self.pool_id, listener_id=self.listener_id, loadbalancer_id=self.loadbalancer_id, name='listener', protocol='TCP', protocol_port=42) self.ref_listener_udp = data_models.Listener( admin_state_up=False, connection_limit=5, default_pool=self.ref_pool, default_pool_id=self.pool_id, listener_id=self.listener_id, loadbalancer_id=self.loadbalancer_id, name='listener', protocol='UDP', protocol_port=42) self.ref_listener_sctp = data_models.Listener( admin_state_up=False, connection_limit=5, default_pool=self.ref_pool, default_pool_id=self.pool_id, listener_id=self.listener_id, loadbalancer_id=self.loadbalancer_id, name='listener', protocol='SCTP', protocol_port=42) self.fail_listener = data_models.Listener( admin_state_up=False, connection_limit=5, default_pool=self.ref_pool, default_pool_id=self.pool_id, listener_id=self.listener_id, loadbalancer_id=self.loadbalancer_id, name='listener', protocol='http', protocol_port=42) self.ref_lb_fully_populated = data_models.LoadBalancer( admin_state_up=False, listeners=[self.ref_listener], pools=[self.ref_pool], loadbalancer_id=self.loadbalancer_id, name='favorite_lb0', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id) self.ref_lb0 = data_models.LoadBalancer( admin_state_up=False, listeners=[self.ref_listener], loadbalancer_id=self.loadbalancer_id, name='favorite_lb0', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id) self.ref_lb1 = data_models.LoadBalancer( admin_state_up=True, listeners=[self.ref_listener], loadbalancer_id=self.loadbalancer_id, name='favorite_lb1', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id) self.fail_health_monitor = data_models.HealthMonitor( admin_state_up=True, name='UnHealthy', pool_id=self.pool_id, healthmonitor_id=self.healthmonitor_id, type="not_valid", delay=1, timeout=2, max_retries_down=3, max_retries=4) self.ref_health_monitor = data_models.HealthMonitor( admin_state_up=True, name='Healthy', pool_id=self.pool_id, healthmonitor_id=self.healthmonitor_id, type=constants.HEALTH_MONITOR_TCP, delay=6, timeout=7, max_retries_down=5, max_retries=3) self.ref_update_health_monitor = data_models.HealthMonitor( admin_state_up=True, name='ReHealthy', healthmonitor_id=self.healthmonitor_id, delay=16, timeout=17, max_retries_down=15, max_retries=13) mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lbs', side_effect=lambda x, protocol=None: self.ovn_lb if protocol else [self.ovn_lb]).start() self.mock_find_lb_pool_key = mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lb_with_pool_key', return_value=self.ovn_lb).start() self.mock_get_subnet_from_pool = mock.patch.object( ovn_helper.OvnProviderHelper, '_get_subnet_from_pool', return_value=None).start() def test__ip_version_differs(self): self.assertFalse(self.driver._ip_version_differs(self.ref_member)) self.ref_member.address = 'fc00::1' self.assertTrue(self.driver._ip_version_differs(self.ref_member)) def test__ip_version_differs_pool_disabled(self): self.mock_find_lb_pool_key.side_effect = [None, self.ovn_lb] self.driver._ip_version_differs(self.ref_member) self.mock_find_lb_pool_key.assert_has_calls([ mock.call('pool_%s' % self.pool_id), mock.call('pool_%s:D' % self.pool_id)]) def _test_member_create(self, member): info = {'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'admin_state_up': self.ref_member.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_CREATE, 'info': info} info_dvr = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} expected_dict_dvr = { 'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': info_dvr} self.driver.member_create(member) expected = [ mock.call(expected_dict), mock.call(expected_dict_dvr)] self.mock_add_request.assert_has_calls(expected) def test_member_create(self): self._test_member_create(self.ref_member) def test_member_create_failure(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.fail_member) def test_member_create_different_ip_version(self): self.ref_member.address = 'fc00::1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) def test_member_create_different_ip_version_lb_disable(self): self.driver._ovn_helper._find_ovn_lb_with_pool_key.side_effect = [ None, self.ovn_lb] self.ref_member.address = 'fc00::1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) self.driver._ovn_helper._find_ovn_lb_with_pool_key.assert_has_calls( [mock.call('pool_%s' % self.pool_id), mock.call('pool_%s%s' % (self.pool_id, ':D'))]) def test_member_create_no_subnet_provided(self): self.ref_member.subnet_id = data_models.UnsetType() self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) self.ref_member.subnet_id = None self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) def test_member_create_no_subnet_provided_get_from_pool(self): self.driver._ovn_helper._get_subnet_from_pool.return_value = ( self.ref_member.subnet_id) member = copy.copy(self.ref_member) member.subnet_id = data_models.UnsetType() self._test_member_create(member) member.subnet_id = None self._test_member_create(member) def test_member_create_monitor_opts(self): self.ref_member.monitor_address = '172.20.20.1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) self.ref_member.monitor_port = '80' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) def test_member_create_no_set_admin_state_up(self): self.ref_member.admin_state_up = data_models.UnsetType() info = {'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'admin_state_up': True} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_CREATE, 'info': info} expected_dict_dvr = {'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': mock.ANY} expected = [ mock.call(expected_dict), mock.call(expected_dict_dvr)] self.driver.member_create(self.ref_member) self.mock_add_request.assert_has_calls(expected) def test_member_update(self): info = {'id': self.update_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'admin_state_up': self.update_member.admin_state_up, 'old_admin_state_up': self.ref_member.admin_state_up, 'subnet_id': self.ref_member.subnet_id} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_UPDATE, 'info': info} self.driver.member_update(self.ref_member, self.update_member) self.mock_add_request.assert_called_once_with(expected_dict) @mock.patch.object(ovn_driver.OvnProviderDriver, '_ip_version_differs') def test_member_update_no_ip_addr(self, mock_ip_differs): self.update_member.address = None self.driver.member_update(self.ref_member, self.update_member) mock_ip_differs.assert_not_called() def test_member_batch_update(self): self.driver.member_batch_update(self.pool_id, [self.ref_member, self.update_member]) self.assertEqual(self.mock_add_request.call_count, 3) def test_member_batch_update_no_members(self): pool_key = 'pool_%s' % self.pool_id ovn_lb = copy.copy(self.ovn_lb) ovn_lb.external_ids[pool_key] = [] self.mock_find_lb_pool_key.return_value = ovn_lb self.driver.member_batch_update(self.pool_id, [self.ref_member, self.update_member]) self.assertEqual(self.mock_add_request.call_count, 2) def test_member_batch_update_skipped_monitor(self): self.ref_member.monitor_address = '10.11.1.1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_batch_update, self.pool_id, [self.ref_member]) def test_member_batch_update_skipped_mixed_ip(self): self.ref_member.address = 'fc00::1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_batch_update, self.pool_id, [self.ref_member]) def test_member_batch_update_unset_admin_state_up(self): self.ref_member.admin_state_up = data_models.UnsetType() self.driver.member_batch_update(self.pool_id, [self.ref_member]) self.assertEqual(self.mock_add_request.call_count, 2) def test_member_batch_update_missing_subnet_id(self): self.ref_member.subnet_id = None self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_batch_update, self.pool_id, [self.ref_member]) def test_member_update_failure(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_update, self.ref_member, self.fail_member) def test_member_update_different_ip_version(self): self.ref_member.address = 'fc00::1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_update, self.ref_member, self.ref_member) def test_member_delete(self): info = {'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_DELETE, 'info': info} info_dvr = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_DELETED} expected_dict_dvr = { 'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': info_dvr} self.driver.member_delete(self.ref_member) expected = [ mock.call(expected_dict), mock.call(expected_dict_dvr)] self.mock_add_request.assert_has_calls(expected) def test_listener_create(self): info = {'id': self.ref_listener.listener_id, 'protocol': self.ref_listener.protocol, 'protocol_port': self.ref_listener.protocol_port, 'default_pool_id': self.ref_listener.default_pool_id, 'admin_state_up': self.ref_listener.admin_state_up, 'loadbalancer_id': self.ref_listener.loadbalancer_id} expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info} self.driver.listener_create(self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) def test_listener_create_unset_admin_state_up(self): self.ref_listener.admin_state_up = data_models.UnsetType() info = {'id': self.ref_listener.listener_id, 'protocol': self.ref_listener.protocol, 'protocol_port': self.ref_listener.protocol_port, 'default_pool_id': self.ref_listener.default_pool_id, 'admin_state_up': True, 'loadbalancer_id': self.ref_listener.loadbalancer_id} expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info} self.driver.listener_create(self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) def test_listener_create_unsupported_protocol(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver.listener_create, self.fail_listener) def test_listener_create_multiple_protocols(self): self.ovn_lb.protocol = ['TCP'] info = {'id': self.ref_listener.listener_id, 'protocol': self.ref_listener.protocol, 'protocol_port': self.ref_listener.protocol_port, 'default_pool_id': self.ref_listener.default_pool_id, 'admin_state_up': self.ref_listener.admin_state_up, 'loadbalancer_id': self.ref_listener.loadbalancer_id} expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info} self.driver.listener_create(self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) self.ovn_lb.protocol = ['UDP'] info['protocol'] = 'UDP' expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info} self.driver.listener_create(self.ref_listener) self.ovn_lb.protocol = ['SCTP'] info['protocol'] = 'SCTP' expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info} self.driver.listener_create(self.ref_listener) def test_listener_update(self): info = {'id': self.ref_listener.listener_id, 'protocol_port': self.ref_listener.protocol_port, 'protocol': self.ref_pool.protocol, 'admin_state_up': self.ref_listener.admin_state_up, 'loadbalancer_id': self.ref_listener.loadbalancer_id} if self.ref_listener.default_pool_id: info['default_pool_id'] = self.ref_listener.default_pool_id expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_UPDATE, 'info': info} self.driver.listener_update(self.ref_listener, self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) def test_listener_delete(self): info = {'id': self.ref_listener.listener_id, 'protocol_port': self.ref_listener.protocol_port, 'protocol': self.ref_pool.protocol, 'loadbalancer_id': self.ref_listener.loadbalancer_id} expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_DELETE, 'info': info} self.driver.listener_delete(self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) def test_loadbalancer_fully_populate_create(self): info = { 'id': self.ref_lb_fully_populated.loadbalancer_id, 'vip_address': self.ref_lb_fully_populated.vip_address, 'vip_network_id': self.ref_lb_fully_populated.vip_network_id, 'admin_state_up': self.ref_lb_fully_populated.admin_state_up} info_listener = { 'id': self.ref_listener.listener_id, 'protocol': self.ref_listener.protocol, 'protocol_port': self.ref_listener.protocol_port, 'default_pool_id': self.ref_listener.default_pool_id, 'admin_state_up': self.ref_listener.admin_state_up, 'loadbalancer_id': self.ref_listener.loadbalancer_id} info_pool = { 'id': self.ref_pool.pool_id, 'loadbalancer_id': self.ref_pool.loadbalancer_id, 'listener_id': self.ref_pool.listener_id, 'protocol': self.ref_pool.protocol, 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'admin_state_up': self.ref_pool.admin_state_up} info_member = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'admin_state_up': self.ref_member.admin_state_up} info_dvr = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} expected_lb_dict = { 'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': info} expected_listener_dict = { 'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info_listener} expected_pool_dict = { 'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': info_pool} expected_member_dict = { 'type': ovn_const.REQ_TYPE_MEMBER_CREATE, 'info': info_member} expected_dict_dvr = { 'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': info_dvr} calls = [mock.call(expected_lb_dict), mock.call(expected_listener_dict), mock.call(expected_pool_dict), mock.call(expected_member_dict), mock.call(expected_dict_dvr)] self.driver.loadbalancer_create(self.ref_lb_fully_populated) self.mock_add_request.assert_has_calls(calls) def test_loadbalancer_create(self): info = {'id': self.ref_lb0.loadbalancer_id, 'vip_address': self.ref_lb0.vip_address, 'vip_network_id': self.ref_lb0.vip_network_id, 'admin_state_up': self.ref_lb0.admin_state_up} expected_dict = { 'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': info} calls = [mock.call(expected_dict)] self.driver.loadbalancer_create(self.ref_lb0) self.mock_add_request.assert_has_calls(calls) def test_loadbalancer_create_unset_admin_state_up(self): self.ref_lb0.admin_state_up = data_models.UnsetType() info = {'id': self.ref_lb0.loadbalancer_id, 'vip_address': self.ref_lb0.vip_address, 'vip_network_id': self.ref_lb0.vip_network_id, 'admin_state_up': True} expected_dict = { 'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': info} calls = [mock.call(expected_dict)] self.driver.loadbalancer_create(self.ref_lb0) self.mock_add_request.assert_has_calls(calls) def test_loadbalancer_update(self): info = {'id': self.ref_lb1.loadbalancer_id, 'admin_state_up': self.ref_lb1.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_LB_UPDATE, 'info': info} self.driver.loadbalancer_update(self.ref_lb0, self.ref_lb1) self.mock_add_request.assert_called_once_with(expected_dict) def test_loadbalancer_delete(self): info = {'id': self.ref_lb0.loadbalancer_id, 'cascade': False} expected_dict = {'type': ovn_const.REQ_TYPE_LB_DELETE, 'info': info} self.driver.loadbalancer_delete(self.ref_lb1) self.mock_add_request.assert_called_once_with(expected_dict) def test_loadbalancer_failover(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver.loadbalancer_failover, self.ref_lb0.loadbalancer_id) def test_pool_create_unsupported_protocol(self): self.ref_pool.protocol = 'HTTP' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.pool_create, self.ref_pool) def test_pool_create_leastcount_algo(self): self.ref_pool.lb_algorithm = constants.LB_ALGORITHM_LEAST_CONNECTIONS self.assertRaises(exceptions.UnsupportedOptionError, self.driver.pool_create, self.ref_pool) def test_pool_create(self): info = {'id': self.ref_pool.pool_id, 'loadbalancer_id': self.ref_pool.loadbalancer_id, 'listener_id': self.ref_pool.listener_id, 'protocol': self.ref_pool.protocol, 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'admin_state_up': self.ref_pool.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': info} self.driver.pool_create(self.ref_pool) self.mock_add_request.assert_called_once_with(expected_dict) def test_pool_create_unset_admin_state_up(self): self.ref_pool.admin_state_up = data_models.UnsetType() info = {'id': self.ref_pool.pool_id, 'loadbalancer_id': self.ref_pool.loadbalancer_id, 'protocol': self.ref_pool.protocol, 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'listener_id': self.ref_pool.listener_id, 'admin_state_up': True} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': info} self.driver.pool_create(self.ref_pool) self.mock_add_request.assert_called_once_with(expected_dict) def test_pool_delete(self): # Pretent we don't have members self.ref_pool.members = [] info = {'id': self.ref_pool.pool_id, 'protocol': self.ref_pool.protocol, 'loadbalancer_id': self.ref_pool.loadbalancer_id} expected = {'type': ovn_const.REQ_TYPE_POOL_DELETE, 'info': info} self.driver.pool_delete(self.ref_pool) self.mock_add_request.assert_called_once_with(expected) def test_pool_delete_with_members(self): info = {'id': self.ref_pool.pool_id, 'protocol': self.ref_pool.protocol, 'loadbalancer_id': self.ref_pool.loadbalancer_id} expected = {'type': ovn_const.REQ_TYPE_POOL_DELETE, 'info': info} info_member = {'id': self.ref_member.member_id, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'protocol_port': self.ref_member.protocol_port, 'address': self.ref_member.address} expected_members = { 'type': ovn_const.REQ_TYPE_MEMBER_DELETE, 'info': info_member} expected_members_dvr = { 'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': mock.ANY} calls = [mock.call(expected_members), mock.call(expected_members_dvr), mock.call(expected)] self.driver.pool_delete(self.ref_pool) self.mock_add_request.assert_has_calls(calls) def test_pool_update(self): info = {'id': self.ref_update_pool.pool_id, 'loadbalancer_id': self.ref_update_pool.loadbalancer_id, 'protocol': self.ref_pool.protocol, 'admin_state_up': self.ref_update_pool.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_UPDATE, 'info': info} self.driver.pool_update(self.ref_pool, self.ref_update_pool) self.mock_add_request.assert_called_once_with(expected_dict) def test_create_vip_port(self): with mock.patch.object(clients, 'get_neutron_client'): port_dict = self.driver.create_vip_port(self.loadbalancer_id, self.project_id, self.vip_dict) self.assertIsNotNone(port_dict.pop('vip_address', None)) self.assertIsNotNone(port_dict.pop('vip_port_id', None)) # The network_driver function is mocked, therefore the # created port vip_address and vip_port_id are also mocked. # Check if it exists and move on. # The finally output is include vip_address, vip_port_id, # vip_network_id and vip_subnet_id. for key, value in port_dict.items(): self.assertEqual(value, self.vip_output[key]) def test_create_vip_port_exception(self): with mock.patch.object(clients, 'get_neutron_client', side_effect=[RuntimeError]): self.assertRaises( exceptions.DriverError, self.driver.create_vip_port, self.loadbalancer_id, self.project_id, self.vip_dict) def test_health_monitor_create(self): info = {'id': self.ref_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'type': self.ref_health_monitor.type, 'interval': self.ref_health_monitor.delay, 'timeout': self.ref_health_monitor.timeout, 'failure_count': self.ref_health_monitor.max_retries_down, 'success_count': self.ref_health_monitor.max_retries, 'admin_state_up': self.ref_health_monitor.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_HM_CREATE, 'info': info} self.driver.health_monitor_create(self.ref_health_monitor) self.mock_add_request.assert_called_once_with(expected_dict) @mock.patch.object(ovn_driver.OvnProviderDriver, '_is_health_check_supported') def test_health_monitor_create_not_supported(self, ihcs): ihcs.return_value = False self.assertRaises(exceptions.UnsupportedOptionError, self.driver.health_monitor_create, self.ref_health_monitor) def test_health_monitor_create_failure(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver.health_monitor_create, self.fail_health_monitor) def test_health_monitor_create_failure_unset_type(self): self.fail_health_monitor.type = data_models.UnsetType() self.assertRaises(exceptions.UnsupportedOptionError, self.driver.health_monitor_create, self.fail_health_monitor) def test_health_monitor_create_unset_admin_state_up(self): self.ref_health_monitor.admin_state_up = data_models.UnsetType() info = {'id': self.ref_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'type': self.ref_health_monitor.type, 'interval': self.ref_health_monitor.delay, 'timeout': self.ref_health_monitor.timeout, 'failure_count': self.ref_health_monitor.max_retries_down, 'success_count': self.ref_health_monitor.max_retries, 'admin_state_up': True} expected_dict = {'type': ovn_const.REQ_TYPE_HM_CREATE, 'info': info} self.driver.health_monitor_create(self.ref_health_monitor) self.mock_add_request.assert_called_once_with(expected_dict) def test_health_monitor_update(self): info = {'id': self.ref_update_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'interval': self.ref_update_health_monitor.delay, 'timeout': self.ref_update_health_monitor.timeout, 'failure_count': self.ref_update_health_monitor.max_retries_down, 'success_count': self.ref_update_health_monitor.max_retries, 'admin_state_up': self.ref_update_health_monitor.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_HM_UPDATE, 'info': info} self.driver.health_monitor_update(self.ref_health_monitor, self.ref_update_health_monitor) self.mock_add_request.assert_called_once_with(expected_dict) def test_health_monitor_update_unset_admin_state_up(self): self.ref_update_health_monitor.admin_state_up = data_models.UnsetType() info = {'id': self.ref_update_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'interval': self.ref_update_health_monitor.delay, 'timeout': self.ref_update_health_monitor.timeout, 'failure_count': self.ref_update_health_monitor.max_retries_down, 'success_count': self.ref_update_health_monitor.max_retries, 'admin_state_up': True} expected_dict = {'type': ovn_const.REQ_TYPE_HM_UPDATE, 'info': info} self.driver.health_monitor_update(self.ref_health_monitor, self.ref_update_health_monitor) self.mock_add_request.assert_called_once_with(expected_dict) def test_health_monitor_delete(self): info = {'id': self.ref_health_monitor.healthmonitor_id} expected_dict = {'type': ovn_const.REQ_TYPE_HM_DELETE, 'info': info} self.driver.health_monitor_delete(self.ref_health_monitor) self.mock_add_request.assert_called_once_with(expected_dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/test_hacking.py0000664000175000017500000000133700000000000027176 0ustar00zuulzuul00000000000000# Copyright 2020 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base class NothingTestCase(base.BaseTestCase): """Nothing test class""" def test_nothing(self): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider/tests/unit/test_helper.py0000664000175000017500000044630600000000000027062 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from unittest import mock from neutron_lib import constants as n_const from neutronclient.common import exceptions as n_exc from octavia_lib.api.drivers import data_models from octavia_lib.api.drivers import exceptions from octavia_lib.common import constants from oslo_utils import uuidutils from ovsdbapp.backend.ovs_idl import idlutils from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider import event as ovn_event from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.tests.unit import base as ovn_base from ovn_octavia_provider.tests.unit import fakes class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase): def setUp(self): super().setUp() self.helper = ovn_helper.OvnProviderHelper() self.real_helper_find_ovn_lb_with_pool_key = ( self.helper._find_ovn_lb_with_pool_key) mock.patch.object(self.helper, '_update_status_to_octavia').start() self.octavia_driver_lib = mock.patch.object( self.helper, '_octavia_driver_lib').start() self.listener = {'id': self.listener_id, 'loadbalancer_id': self.loadbalancer_id, 'protocol': 'TCP', 'protocol_port': 80, 'default_pool_id': self.pool_id, 'admin_state_up': False} self.lb = {'id': self.loadbalancer_id, 'vip_address': self.vip_address, 'cascade': False, 'vip_network_id': self.vip_network_id, 'admin_state_up': False} self.ports = {'ports': [{ 'fixed_ips': [{'ip_address': self.vip_address, 'subnet_id': uuidutils.generate_uuid()}], 'network_id': self.vip_network_id, 'id': self.port1_id}]} self.pool = {'id': self.pool_id, 'loadbalancer_id': self.loadbalancer_id, 'listener_id': self.listener_id, 'protocol': 'TCP', 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'admin_state_up': False} self.member = {'id': self.member_id, 'address': self.member_address, 'protocol_port': self.member_port, 'subnet_id': self.member_subnet_id, 'pool_id': self.member_pool_id, 'admin_state_up': True, 'old_admin_state_up': True} self.health_monitor = {'id': self.healthmonitor_id, 'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_TCP, 'interval': 6, 'timeout': 7, 'failure_count': 5, 'success_count': 3, 'admin_state_up': True} self.health_mon_udp = {'id': self.healthmonitor_id, 'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_UDP_CONNECT, 'interval': 6, 'timeout': 7, 'failure_count': 5, 'success_count': 3, 'admin_state_up': True} self.ovn_nbdb_api = mock.patch.object(self.helper, 'ovn_nbdb_api') self.ovn_nbdb_api.start() add_req_thread = mock.patch.object(ovn_helper.OvnProviderHelper, 'add_request') self.mock_add_request = add_req_thread.start() self.ovn_lb = mock.MagicMock() self.ovn_lb.protocol = ['tcp'] self.ovn_lb.uuid = uuidutils.generate_uuid() self.ovn_lb.health_check = [] self.ovn_hm_lb = mock.MagicMock() self.ovn_hm_lb.protocol = ['tcp'] self.ovn_hm_lb.uuid = uuidutils.generate_uuid() self.ovn_hm_lb.health_check = [] self.ovn_hm = mock.MagicMock() self.ovn_hm.uuid = self.healthmonitor_id self.ovn_hm.external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: self.ovn_hm.uuid} self.member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port', 'enabled': True, 'pool_%s' % self.pool_id: self.member_line, 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} self.ovn_hm_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.99', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.99', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_hm_port', 'enabled': True, 'pool_%s' % self.pool_id: [], 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} self.helper.ovn_nbdb_api.db_find.return_value.\ execute.return_value = [self.ovn_lb] self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.return_value = [self.ovn_lb] self.mock_find_lb_pool_key = mock.patch.object( self.helper, '_find_ovn_lb_with_pool_key', return_value=self.ovn_lb).start() self.mock_find_ovn_lbs = mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lbs', side_effect=lambda x, protocol=None: self.ovn_lb if protocol else [self.ovn_lb]) self.mock_find_ovn_lbs.start() mock.patch.object(self.helper, '_get_pool_listeners', return_value=[]).start() self._update_lb_to_ls_association = mock.patch.object( self.helper, '_update_lb_to_ls_association', return_value=[]) self._update_lb_to_ls_association.start() self._update_lb_to_lr_association = mock.patch.object( self.helper, '_update_lb_to_lr_association', return_value=[]) self._update_lb_to_lr_association.start() # NOTE(mjozefcz): Create foo router and network. net_id = uuidutils.generate_uuid() router_id = uuidutils.generate_uuid() self.ref_lb1 = fakes.FakeLB( uuid=uuidutils.generate_uuid(), admin_state_up=True, listeners=[], loadbalancer_id=self.loadbalancer_id, name='favorite_lb1', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id, ext_ids={ ovn_const.LB_EXT_IDS_LR_REF_KEY: 'neutron-%s' % net_id, ovn_const.LB_EXT_IDS_LS_REFS_KEY: '{\"neutron-%s\": 1}' % net_id}) self.ref_lb2 = fakes.FakeLB( uuid=uuidutils.generate_uuid(), admin_state_up=True, listeners=[], loadbalancer_id=self.loadbalancer_id, name='favorite_lb2', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id, ext_ids={ ovn_const.LB_EXT_IDS_LR_REF_KEY: 'neutron-%s' % net_id, ovn_const.LB_EXT_IDS_LS_REFS_KEY: '{\"neutron-%s\": 1}' % net_id}) # TODO(mjozefcz): Consider using FakeOVNRouter. self.router = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'load_balancer': [self.ref_lb1], 'name': 'neutron-%s' % router_id, 'ports': []}) # TODO(mjozefcz): Consider using FakeOVNSwitch. self.network = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'load_balancer': [self.ref_lb2], 'name': 'neutron-%s' % net_id, 'ports': [], 'uuid': net_id}) self.mock_get_nw = mock.patch.object( self.helper, '_get_nw_router_info_on_interface_event', return_value=(self.router, self.network)) self.mock_get_nw.start() (self.helper.ovn_nbdb_api.ls_get.return_value. execute.return_value) = self.network def test__is_lb_empty(self): f = self.helper._is_lb_empty self.assertFalse(f(self.ovn_lb.external_ids)) self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) self.assertFalse(f(self.ovn_lb.external_ids)) self.ovn_lb.external_ids.pop('pool_%s' % self.pool_id) self.assertTrue(f(self.ovn_lb.external_ids)) def test__delete_disabled_from_status(self): f = self.helper._delete_disabled_from_status status = { 'pools': [ {'id': 'f:D', 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'members': [ {'id': 'foo:D', 'provisioning_status': 'ACTIVE'}]} expected = { 'pools': [ {'id': 'f', 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'members': [ {'id': 'foo', 'provisioning_status': 'ACTIVE'}]} self.assertEqual(f(status), expected) self.assertEqual(f(expected), expected) status = {} self.assertFalse(f(status)) def test__find_ovn_lb_with_pool_key(self): pool_key = self.helper._get_pool_key(uuidutils.generate_uuid()) test_lb = mock.MagicMock() test_lb.external_ids = { ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: ovn_const.PORT_FORWARDING_PLUGIN, pool_key: 'it_is_a_pool_party', } self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.return_value = [test_lb] f = self.real_helper_find_ovn_lb_with_pool_key # Ensure lb is not found, due to its device owner found = f(pool_key) self.assertIsNone(found) # Remove device owner from test_lb.external_ids and make sure test_lb # is found as expected test_lb.external_ids.pop(ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY) found = f(pool_key) self.assertEqual(found, test_lb) def test__find_ovn_lbs(self): self.mock_find_ovn_lbs.stop() f = self.helper._find_ovn_lbs self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb] # Without protocol specified return a list found = f(self.ovn_lb.id) self.assertListEqual(found, [self.ovn_lb]) self.helper.ovn_nbdb_api.db_find_rows.assert_called_once_with( 'Load_Balancer', ('name', '=', self.ovn_lb.id)) self.helper.ovn_nbdb_api.db_find_rows.reset_mock() # With protocol specified return an instance found = f(self.ovn_lb.id, protocol='tcp') self.assertEqual(found, self.ovn_lb) self.helper.ovn_nbdb_api.db_find_rows.reset_mock() # LB with given protocol not found self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [] self.assertRaises( idlutils.RowNotFound, f, self.ovn_lb.id, protocol='UDP') # LB with given protocol not found self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [] self.assertRaises( idlutils.RowNotFound, f, self.ovn_lb.id, protocol='SCTP') # Multiple protocols udp_lb = copy.copy(self.ovn_lb) udp_lb.protocol = ['udp'] sctp_lb = copy.copy(self.ovn_lb) sctp_lb.protocol = ['sctp'] self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb, udp_lb, sctp_lb] found = f(self.ovn_lb.id) self.assertListEqual(found, [self.ovn_lb, udp_lb, sctp_lb]) def test__get_subnet_from_pool(self): f = self.helper._get_subnet_from_pool lb = data_models.LoadBalancer( loadbalancer_id=self.loadbalancer_id, name='The LB', vip_address=self.vip_address, vip_subnet_id=self.vip_subnet_id, vip_network_id=self.vip_network_id) lb_pool = data_models.Pool( loadbalancer_id=self.loadbalancer_id, name='The pool', pool_id=self.pool_id, protocol='TCP') with mock.patch.object(self.helper, '_octavia_driver_lib') as dlib: dlib.get_pool.return_value = None found = f('not_found') self.assertIsNone(found) dlib.get_pool.return_value = lb_pool dlib.get_loadbalancer.return_value = lb found = f(self.pool_id) self.assertEqual(found, lb.vip_subnet_id) def test__get_or_create_ovn_lb_no_lb_found(self): self.mock_find_ovn_lbs.stop() self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [] self.assertRaises( idlutils.RowNotFound, self.helper._get_or_create_ovn_lb, self.ovn_lb.name, protocol='TCP', admin_state_up='True') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test__get_or_create_ovn_lb_required_proto_not_found(self, lbc): udp_lb = copy.copy(self.ovn_lb) udp_lb.protocol = ['udp'] self.mock_find_ovn_lbs.stop() self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.side_effect = [[udp_lb], [self.ovn_lb]] self.helper._get_or_create_ovn_lb( self.ovn_lb.name, protocol='TCP', admin_state_up='True') expected_lb_info = { 'id': self.ovn_lb.name, 'protocol': 'tcp', 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'vip_address': udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_KEY), 'vip_port_id': udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY), ovn_const.LB_EXT_IDS_LR_REF_KEY: udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY), ovn_const.LB_EXT_IDS_LS_REFS_KEY: udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_LS_REFS_KEY), 'admin_state_up': 'True', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_FIP_KEY)} lbc.assert_called_once_with(expected_lb_info, protocol='tcp') def test__get_or_create_ovn_lb_found(self): self.mock_find_ovn_lbs.stop() self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb] found = self.helper._get_or_create_ovn_lb( self.ovn_lb.name, protocol='TCP', admin_state_up='True') self.assertEqual(found, self.ovn_lb) def test__get_or_create_ovn_lb_lb_without_protocol(self): self.mock_find_ovn_lbs.stop() self.ovn_lb.protocol = [] self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb] found = self.helper._get_or_create_ovn_lb( self.ovn_lb.name, protocol='TCP', admin_state_up='True') self.assertEqual(found, self.ovn_lb) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('protocol', 'tcp')) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_disabled(self, net_cli): self.lb['admin_state_up'] = False net_cli.return_value.list_ports.return_value = self.ports status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'False'}, name=mock.ANY, protocol=[], selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_enabled(self, net_cli): self.lb['admin_state_up'] = True net_cli.return_value.list_ports.return_value = self.ports status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'}, name=mock.ANY, protocol=[], selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_selection_fields_not_supported(self, net_cli): self.lb['admin_state_up'] = True net_cli.return_value.list_ports.return_value = self.ports self.helper._are_selection_fields_supported = ( mock.Mock(return_value=False)) status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'}, name=mock.ANY, protocol=[]) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_selection_fields_not_supported_algo(self, net_cli): self.lb['admin_state_up'] = True net_cli.return_value.list_ports.return_value = self.ports net_cli.return_value.show_subnet.return_value = { 'subnet': mock.MagicMock()} self.pool['lb_algoritm'] = 'foo' status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) # NOTE(mjozefcz): Make sure that we use the same selection # fields as for default algorithm - source_ip_port. self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'}, name=mock.ANY, protocol=[], selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def _test_lb_create_on_multi_protocol(self, protocol, net_cli): """This test situation when new protocol is added to the same loadbalancer and we need to add additional OVN lb with the same name. """ self.lb['admin_state_up'] = True self.lb['protocol'] = protocol self.lb[ovn_const.LB_EXT_IDS_LR_REF_KEY] = 'foo' self.lb[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = '{\"neutron-foo\": 1}' net_cli.return_value.list_ports.return_value = self.ports status = self.helper.lb_create(self.lb, protocol=protocol) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, ovn_const.LB_EXT_IDS_LR_REF_KEY: 'foo', 'enabled': 'True'}, name=mock.ANY, protocol=protocol.lower(), selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) self.helper._update_lb_to_ls_association.assert_has_calls([ mock.call(self.ovn_lb, associate=True, network_id=self.lb['vip_network_id']), mock.call(self.ovn_lb, associate=True, network_id='foo')]) def test_lb_create_on_multi_protocol_UDP(self): self._test_lb_create_on_multi_protocol('UDP') def test_lb_create_on_multi_protocol_SCTP(self): self._test_lb_create_on_multi_protocol('SCTP') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port') def test_lb_create_exception(self, del_port, net_cli): self.helper._find_ovn_lbs.side_effect = [RuntimeError] net_cli.return_value.list_ports.return_value = self.ports status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) del_port.assert_called_once_with(self.ports.get('ports')[0]['id']) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port') def test_lb_delete(self, del_port, net_cli): net_cli.return_value.delete_port.return_value = None status = self.helper.lb_delete(self.ovn_lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) del_port.assert_called_once_with('foo_port') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port') def test_lb_delete_row_not_found(self, del_port): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_not_called() del_port.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port') def test_lb_delete_exception(self, del_port): self.helper.ovn_nbdb_api.lb_del.side_effect = [RuntimeError] status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) del_port.assert_called_once_with('foo_port') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port') def test_lb_delete_port_not_found(self, del_port, net_cli): net_cli.return_value.delete_port.side_effect = ( [n_exc.PortNotFoundClient]) status = self.helper.lb_delete(self.ovn_lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) del_port.assert_called_once_with('foo_port') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_delete_cascade(self, net_cli): net_cli.return_value.delete_port.return_value = None self.lb['cascade'] = True status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['pools'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['members'][0]['provisioning_status'], constants.DELETED) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_delete_ls_lr(self, net_cli): self.ovn_lb.external_ids.update({ ovn_const.LB_EXT_IDS_LR_REF_KEY: self.router.name, ovn_const.LB_EXT_IDS_LS_REFS_KEY: '{\"neutron-%s\": 1}' % self.network.uuid}) net_cli.return_value.delete_port.return_value = None (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.helper.ovn_nbdb_api.lookup.return_value = self.router self.helper.lb_delete(self.ovn_lb) self.helper.ovn_nbdb_api.ls_lb_del.assert_called_once_with( self.network.uuid, self.ovn_lb.uuid) self.helper.ovn_nbdb_api.lr_lb_del.assert_called_once_with( self.router.uuid, self.ovn_lb.uuid) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_delete_multiple_protocols(self, net_cli): net_cli.return_value.delete_port.return_value = None self.mock_find_ovn_lbs.stop() udp_lb = copy.copy(self.ovn_lb) udp_lb.protocol = ['udp'] udp_lb.uuid = 'foo_uuid' self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb, udp_lb] self.helper.lb_delete(self.lb) self.helper.ovn_nbdb_api.lb_del.assert_has_calls([ mock.call(self.ovn_lb.uuid), mock.call(udp_lb.uuid)]) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_lb_update_disabled(self, refresh_vips): self.lb['admin_state_up'] = False status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'enabled': 'False'})) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_lb_update_enabled(self, refresh_vips): # Change the mock, its enabled by default. self.ovn_lb.external_ids.update({'enabled': False}) self.lb['admin_state_up'] = True status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'enabled': 'True'})) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_lb_update_enabled_multiple_protocols(self, refresh_vips): self.mock_find_ovn_lbs.stop() self.ovn_lb.external_ids.update({'enabled': 'False'}) udp_lb = copy.deepcopy(self.ovn_lb) udp_lb.protocol = ['udp'] udp_lb.uuid = 'foo_uuid' self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb, udp_lb] self.lb['admin_state_up'] = True status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) refresh_vips.assert_has_calls([ mock.call(self.ovn_lb.uuid, self.ovn_lb.external_ids), mock.ANY, mock.ANY, mock.call(udp_lb.uuid, udp_lb.external_ids)], any_order=False) self.helper.ovn_nbdb_api.db_set.assert_has_calls([ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'enabled': 'True'})), mock.call('Load_Balancer', udp_lb.uuid, ('external_ids', {'enabled': 'True'}))]) def test_lb_update_exception(self): self.helper._find_ovn_lbs.side_effect = [RuntimeError] status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) def test_lb_update_no_admin_state_up(self): self.lb.pop('admin_state_up') status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.helper._find_ovn_lbs.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_create_disabled(self, refresh_vips): self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) status = self.helper.listener_create(self.listener) # Set expected as disabled self.ovn_lb.external_ids.update({ 'listener_%s:D' % self.listener_id: '80:pool_%s' % self.pool_id}) refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) expected_calls = [ mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'listener_%s:D' % self.listener_id: '80:pool_%s' % self.pool_id})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('protocol', 'tcp'))] self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_calls) self.assertEqual( len(expected_calls), self.helper.ovn_nbdb_api.db_set.call_count) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_create_enabled(self, refresh_vips): self.listener['admin_state_up'] = True status = self.helper.listener_create(self.listener) refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) expected_calls = [ mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_calls) self.assertEqual( len(expected_calls), self.helper.ovn_nbdb_api.db_set.call_count) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) def test_listener_create_no_default_pool(self): self.listener['admin_state_up'] = True self.listener.pop('default_pool_id') self.helper.listener_create(self.listener) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'listener_%s' % self.listener_id: '80:'})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) self.assertEqual( len(expected_calls), self.helper.ovn_nbdb_api.db_set.call_count) def test_listener_create_exception(self): self.helper.ovn_nbdb_api.db_set.side_effect = [RuntimeError] status = self.helper.listener_create(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['listeners'][0]['operating_status'], constants.ERROR) def test_listener_update(self): status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.listener['admin_state_up'] = True status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) def test_listener_update_row_not_found(self): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ERROR) self.helper.ovn_nbdb_api.db_set.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_exception(self, refresh_vips): refresh_vips.side_effect = [RuntimeError] status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_listener_enabled(self, refresh_vips): self.listener['admin_state_up'] = True # Update the listener port. self.listener.update({'protocol_port': 123}) status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'listener_%s' % self.listener_id: '123:pool_%s' % self.pool_id})) # Update expected listener, because it was updated. self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) self.ovn_lb.external_ids.update( {'listener_%s' % self.listener_id: '123:pool_%s' % self.pool_id}) refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_listener_disabled(self, refresh_vips): self.listener['admin_state_up'] = False status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) # It gets disabled, so update the key self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) self.ovn_lb.external_ids.update( {'listener_%s:D' % self.listener_id: '80:pool_%s' % self.pool_id}) refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_no_admin_state_up(self, refresh_vips): self.listener.pop('admin_state_up') status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.helper.ovn_nbdb_api.db_remove.assert_not_called() refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_no_admin_state_up_or_default_pool_id( self, refresh_vips): self.listener.pop('admin_state_up') self.listener.pop('default_pool_id') status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.helper.ovn_nbdb_api.db_remove.assert_not_called() refresh_vips.assert_not_called() def test_listener_delete_no_external_id(self): self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) status = self.helper.listener_delete(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.db_remove.assert_not_called() def test_listener_delete_row_not_found(self): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] status = self.helper.listener_delete(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) def test_listener_delete_exception(self): self.helper.ovn_nbdb_api.db_remove.side_effect = [RuntimeError] status = self.helper.listener_delete(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['listeners'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_delete_external_id(self, refresh_vips): status = self.helper.listener_delete(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_listener_delete_ovn_lb_not_empty(self, lb_empty): lb_empty.return_value = False self.helper.listener_delete(self.listener) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_listener_delete_ovn_lb_empty_octavia_lb_empty(self, lb_empty): """That test situation when the OVN and Octavia LBs are empty. That test situation when both OVN and Octavia LBs are empty, but we cannot remove OVN LB row. """ lb_empty.return_value = True self.helper.listener_delete(self.listener) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() # Assert that protocol has been set to []. self.helper.ovn_nbdb_api.db_set.assert_has_calls([ mock.call('Load_Balancer', self.ovn_lb.uuid, ('protocol', []))]) @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_listener_delete_ovn_lb_empty_octavia_lb_not_empty(self, lb_empty): """We test if we can remove one LB with not used protocol""" ovn_lb_udp = copy.copy(self.ovn_lb) ovn_lb_udp.protocol = ['udp'] self.mock_find_ovn_lbs.stop() self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.side_effect = [[self.ovn_lb], [self.ovn_lb, ovn_lb_udp]] lb_empty.return_value = True self.helper.listener_delete(self.listener) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) # Validate that the vips column hasn't been touched, because # in previous command we remove the LB, so there is no need # to update it. self.helper.ovn_nbdb_api.db_set.assert_not_called() def test_pool_create(self): status = self.helper.pool_create(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.pool['admin_state_up'] = True # Pool Operating status shouldnt change if member isnt present. status = self.helper.pool_create(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) # Pool without listener set should be OFFLINE self.pool['listener_id'] = None status = self.helper.pool_create(self.pool) self.assertEqual(status['pools'][0]['operating_status'], constants.OFFLINE) def test_pool_create_exception(self): self.helper.ovn_nbdb_api.db_set.side_effect = [RuntimeError] status = self.helper.pool_create(self.pool) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) def test_pool_update(self): status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.OFFLINE) self.pool['admin_state_up'] = True status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) def test_pool_update_exception_not_found(self): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] status = self.helper.pool_update(self.pool) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ERROR) def test_pool_update_exception(self): self.helper._get_pool_listeners.side_effect = [RuntimeError] status = self.helper.pool_update(self.pool) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ERROR) def test_pool_update_unset_admin_state_up(self): self.pool.pop('admin_state_up') status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) def test_pool_update_pool_disabled_change_to_up(self): self.pool.update({'admin_state_up': True}) disabled_p_key = self.helper._get_pool_key(self.pool_id, is_enabled=False) p_key = self.helper._get_pool_key(self.pool_id) self.ovn_lb.external_ids.update({ disabled_p_key: self.member_line}) self.ovn_lb.external_ids.pop(p_key) status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'pool_%s' % self.pool_id: self.member_line})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {'10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_pool_update_pool_up_change_to_disabled(self): self.pool.update({'admin_state_up': False}) status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.OFFLINE) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'pool_%s:D' % self.pool_id: self.member_line})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_pool_update_listeners(self): self.helper._get_pool_listeners.return_value = ['listener1'] status = self.helper.pool_update(self.pool) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) def test_pool_delete(self): status = self.helper.pool_delete(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.DELETED) self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'vips') self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s' % self.pool_id) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {})), mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port', 'enabled': True, 'listener_%s' % self.listener_id: '80:'}))] self.assertEqual(self.helper.ovn_nbdb_api.db_set.call_count, len(expected_calls)) self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_pool_delete_row_not_found(self): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] status = self.helper.pool_delete(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.DELETED) self.helper.ovn_nbdb_api.db_remove.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_not_called() def test_pool_delete_exception(self): self.helper.ovn_nbdb_api.db_set.side_effect = [RuntimeError] status = self.helper.pool_delete(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ERROR) def test_pool_delete_associated_listeners(self): self.helper._get_pool_listeners.return_value = ['listener1'] status = self.helper.pool_delete(self.pool) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.helper.ovn_nbdb_api.db_set.assert_called_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'enabled': True, 'listener_%s' % self.listener_id: '80:', ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port'})) def test_pool_delete_pool_disabled(self): disabled_p_key = self.helper._get_pool_key(self.pool_id, is_enabled=False) p_key = self.helper._get_pool_key(self.pool_id) self.ovn_lb.external_ids.update({ disabled_p_key: self.member_line}) self.ovn_lb.external_ids.pop(p_key) status = self.helper.pool_delete(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.DELETED) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s:D' % self.pool_id) @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_pool_delete_ovn_lb_not_empty(self, lb_empty): lb_empty.return_value = False self.helper.pool_delete(self.pool) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s' % self.pool_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_pool_delete_ovn_lb_empty_lb_empty(self, lb_empty): lb_empty.return_value = True self.helper.pool_delete(self.pool) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s' % self.pool_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() # Assert that protocol has been set to []. self.helper.ovn_nbdb_api.db_set.assert_called_with( 'Load_Balancer', self.ovn_lb.uuid, ('protocol', [])) @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_pool_delete_ovn_lb_empty_lb_not_empty(self, lb_empty): ovn_lb_udp = copy.copy(self.ovn_lb) self.mock_find_ovn_lbs.stop() self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.side_effect = [[self.ovn_lb], [self.ovn_lb, ovn_lb_udp]] lb_empty.return_value = True self.helper.pool_delete(self.pool) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s' % self.pool_id) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_create(self, net_cli): net_cli.return_value.show_subnet.side_effect = [ idlutils.RowNotFound, idlutils.RowNotFound] self.ovn_lb.external_ids = mock.MagicMock() status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.member['admin_state_up'] = False status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.OFFLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_create_lb_add_from_lr(self, net_cli, f_lr, folbpi): fake_subnet = fakes.FakeSubnet.create_one_subnet() net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet} f_lr.return_value = self.router pool_key = 'pool_%s' % self.pool_id folbpi.return_value = (pool_key, self.ovn_lb) self.ovn_lb.external_ids = mock.MagicMock() status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) f_lr.assert_called_once_with(self.network, fake_subnet['gateway_ip']) self.helper._update_lb_to_lr_association.assert_called_once_with( self.ovn_lb, self.router) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_create_lb_add_from_lr_no_ls(self, net_cli, f_lr, f_ls): fake_subnet = fakes.FakeSubnet.create_one_subnet() net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet} self.ovn_lb.external_ids = mock.MagicMock() (self.helper.ovn_nbdb_api.ls_get.return_value. execute.side_effect) = [n_exc.NotFound] status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) (self.helper.ovn_nbdb_api.ls_get.return_value.execute. assert_called_once_with(check_error=True)) f_lr.assert_not_called() f_ls.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_add_member') def test_member_create_exception(self, mock_add_member): mock_add_member.side_effect = [RuntimeError] status = self.helper.member_create(self.member) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ERROR) def test_member_create_lb_disabled(self): self.helper._find_ovn_lb_with_pool_key.side_effect = [ None, self.ovn_lb] self.helper.member_create(self.member) self.helper._find_ovn_lb_with_pool_key.assert_has_calls( [mock.call('pool_%s' % self.pool_id), mock.call('pool_%s%s' % (self.pool_id, ':D'))]) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_create_listener(self, net_cli): net_cli.return_value.show_subnet.side_effect = [idlutils.RowNotFound] self.ovn_lb.external_ids = mock.MagicMock() self.helper._get_pool_listeners.return_value = ['listener1'] status = self.helper.member_create(self.member) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['id'], 'listener1') def test_member_create_already_exists(self): self.helper.member_create(self.member) self.helper.ovn_nbdb_api.db_set.assert_not_called() def test_member_create_first_member_in_pool(self): self.ovn_lb.external_ids.update({ 'pool_' + self.pool_id: ''}) self.helper.member_create(self.member) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'pool_%s' % self.pool_id: self.member_line})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', { '10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_member_create_second_member_in_pool(self): member2_id = uuidutils.generate_uuid() member2_subnet_id = uuidutils.generate_uuid() member2_port = '1010' member2_address = '192.168.2.150' member2_line = ('member_%s_%s:%s_%s' % (member2_id, member2_address, member2_port, member2_subnet_id)) self.ovn_lb.external_ids.update( {'pool_%s' % self.pool_id: member2_line}) self.helper.member_create(self.member) all_member_line = ( '%s,member_%s_%s:%s_%s' % (member2_line, self.member_id, self.member_address, self.member_port, self.member_subnet_id)) # We have two members now. expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'pool_%s' % self.pool_id: all_member_line})), mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('vips', { '10.22.33.4:80': '192.168.2.150:1010,192.168.2.149:1010', '123.123.123.123:80': '192.168.2.150:1010,192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_member_update(self): self.ovn_lb.external_ids = mock.MagicMock() status = self.helper.member_update(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) self.member['admin_state_up'] = False status = self.helper.member_update(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.OFFLINE) self.member['old_admin_state_up'] = False self.member['admin_state_up'] = True status = self.helper.member_update(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.NO_MONITOR) def test_member_update_disabled_lb(self): self.helper._find_ovn_lb_with_pool_key.side_effect = [ None, self.ovn_lb] self.helper.member_update(self.member) self.helper._find_ovn_lb_with_pool_key.assert_has_calls( [mock.call('pool_%s' % self.pool_id), mock.call('pool_%s%s' % (self.pool_id, ':D'))]) def test_member_update_pool_listeners(self): self.ovn_lb.external_ids = mock.MagicMock() self.helper._get_pool_listeners.return_value = ['listener1'] status = self.helper.member_update(self.member) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['id'], 'listener1') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_member') def test_member_update_exception(self, mock_update_member): mock_update_member.side_effect = [RuntimeError] status = self.helper.member_update(self.member) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) def test_member_update_new_member_line(self): old_member_line = ( 'member_%s_%s:%s' % (self.member_id, self.member_address, self.member_port)) new_member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.ovn_lb.external_ids.update( {'pool_%s' % self.pool_id: old_member_line}) self.helper.member_update(self.member) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'pool_%s' % self.pool_id: new_member_line}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_member_update_new_port(self): new_port = 11 member_line = ('member_%s_%s:%s_%s' % (self.member_id, self.member_address, new_port, self.member_subnet_id)) self.ovn_lb.external_ids.update( {'pool_%s' % self.pool_id: member_line}) self.helper.member_update(self.member) new_member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'pool_%s' % self.pool_id: new_member_line})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', { '10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_refresh_lb_vips') def test_member_delete(self, mock_vip_command): status = self.helper.member_delete(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.DELETED) def test_member_delete_one_left(self): member2_id = uuidutils.generate_uuid() member2_port = '1010' member2_address = '192.168.2.150' member2_subnet_id = uuidutils.generate_uuid() member_line = ( 'member_%s_%s:%s_%s,member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id, member2_id, member2_address, member2_port, member2_subnet_id)) self.ovn_lb.external_ids.update({ 'pool_' + self.pool_id: member_line}) status = self.helper.member_delete(self.member) self.assertEqual(status['members'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_remove_member') def test_member_delete_exception(self, mock_remove_member): mock_remove_member.side_effect = [RuntimeError] status = self.helper.member_delete(self.member) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) def test_member_delete_disabled_lb(self): self.helper._find_ovn_lb_with_pool_key.side_effect = [ None, self.ovn_lb] self.helper.member_delete(self.member) self.helper._find_ovn_lb_with_pool_key.assert_has_calls( [mock.call('pool_%s' % self.pool_id), mock.call('pool_%s%s' % (self.pool_id, ':D'))]) def test_member_delete_pool_listeners(self): member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.ovn_lb.external_ids.update({ 'pool_' + self.pool_id: member_line}) self.helper._get_pool_listeners.return_value = ['listener1'] status = self.helper.member_delete(self.member) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['id'], 'listener1') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_logical_router_port_event_create(self, net_cli): self.router_port_event = ovn_event.LogicalRouterPortEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'gateway_chassis': []}) self.router_port_event.run('create', row, mock.ANY) expected = { 'info': {'router': self.router, 'network': self.network}, 'type': 'lb_create_lrp_assoc'} self.mock_add_request.assert_called_once_with(expected) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_logical_router_port_event_delete(self, net_cli): self.router_port_event = ovn_event.LogicalRouterPortEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'gateway_chassis': []}) self.router_port_event.run('delete', row, mock.ANY) expected = { 'info': {'router': self.router, 'network': self.network}, 'type': 'lb_delete_lrp_assoc'} self.mock_add_request.assert_called_once_with(expected) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_logical_router_port_event_gw_port(self, net_cli): self.router_port_event = ovn_event.LogicalRouterPortEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'gateway_chassis': ['temp-gateway-chassis']}) self.router_port_event.run(mock.ANY, row, mock.ANY) self.mock_add_request.assert_not_called() def test__get_nw_router_info_on_interface_event(self): self.mock_get_nw.stop() lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'network1'} }) self.helper._get_nw_router_info_on_interface_event(lrp) expected_calls = [ mock.call.lookup('Logical_Router', 'neutron-router1'), mock.call.lookup('Logical_Switch', 'network1')] self.helper.ovn_nbdb_api.assert_has_calls(expected_calls) def test__get_nw_router_info_on_interface_event_not_found(self): self.mock_get_nw.stop() self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'} }) self.assertRaises( idlutils.RowNotFound, self.helper._get_nw_router_info_on_interface_event, lrp) def test_lb_delete_lrp_assoc_handler(self): lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.helper.lb_delete_lrp_assoc_handler(lrp) expected = { 'info': {'router': self.router, 'network': self.network}, 'type': 'lb_delete_lrp_assoc'} self.mock_add_request.assert_called_once_with(expected) def test_lb_delete_lrp_assoc_handler_info_not_found(self): self.mock_get_nw.stop() self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'} }) self.helper.lb_delete_lrp_assoc_handler(lrp) self.mock_add_request.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_delete_lrp_assoc_no_net_lb_no_r_lb(self, mock_execute): info = { 'network': self.network, 'router': self.router, } self.network.load_balancer = [] self.router.load_balancer = [] self.helper.lb_delete_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_not_called() mock_execute.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_delete_lrp_assoc_no_net_lb_r_lb(self, mock_execute): info = { 'network': self.network, 'router': self.router, } self.network.load_balancer = [] self.helper.lb_delete_lrp_assoc(info) expected = [ self.helper.ovn_nbdb_api.ls_lb_del( self.network.uuid, self.router.load_balancer[0].uuid ), ] self.helper._update_lb_to_lr_association.assert_not_called() mock_execute.assert_called_once_with(expected) @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_delete_lrp_assoc_net_lb_no_r_lb(self, mock_execute): info = { 'network': self.network, 'router': self.router, } self.router.load_balancer = [] self.helper.lb_delete_lrp_assoc(info) mock_execute.assert_not_called() self.helper._update_lb_to_lr_association.assert_called_once_with( self.network.load_balancer[0], self.router, delete=True ) @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_delete_lrp_assoc(self, mock_execute): info = { 'network': self.network, 'router': self.router, } self.helper.lb_delete_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_called_once_with( self.network.load_balancer[0], self.router, delete=True ) expected = [ self.helper.ovn_nbdb_api.ls_lb_del( self.network.uuid, self.router.load_balancer[0].uuid ), ] mock_execute.assert_called_once_with(expected) def test_lb_create_lrp_assoc_handler(self): lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.helper.lb_create_lrp_assoc_handler(lrp) expected = { 'info': {'router': self.router, 'network': self.network}, 'type': 'lb_create_lrp_assoc'} self.mock_add_request.assert_called_once_with(expected) def test_lb_create_lrp_assoc_handler_row_not_found(self): self.mock_get_nw.stop() self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'} }) self.helper.lb_create_lrp_assoc_handler(lrp) self.mock_add_request.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_create_lrp_assoc(self, mock_execute): info = { 'network': self.network, 'router': self.router, } self.helper.lb_create_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_called_once_with( self.network.load_balancer[0], self.router ) expected = [ self.helper.ovn_nbdb_api.ls_lb_add( self.network.uuid, self.router.load_balancer[0].uuid ), ] mock_execute.assert_called_once_with(expected) @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_create_lrp_assoc_uniq_lb(self, mock_execute): info = { 'network': self.network, 'router': self.router, } # Make it already uniq. self.network.load_balancer = self.router.load_balancer self.helper.lb_create_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_not_called() mock_execute.assert_not_called() def test__find_lb_in_ls(self): net_lb = self.helper._find_lb_in_ls(self.network) for lb in self.network.load_balancer: self.assertIn(lb, net_lb) def test__find_lb_in_ls_wrong_ref(self): # lets break external_ids refs self.network.load_balancer[0].external_ids.update({ ovn_const.LB_EXT_IDS_LS_REFS_KEY: 'foo'}) net_lb = self.helper._find_lb_in_ls(self.network) for lb in self.network.load_balancer: self.assertNotIn(lb, net_lb) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__find_ls_for_lr(self, net_cli): fake_subnet1 = fakes.FakeSubnet.create_one_subnet() fake_subnet1['network_id'] = 'foo1' fake_subnet2 = fakes.FakeSubnet.create_one_subnet() fake_subnet2['network_id'] = 'foo2' net_cli.return_value.show_subnet.side_effect = [ {'subnet': fake_subnet1}, {'subnet': fake_subnet2}] p1 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': [], 'external_ids': { ovn_const.OVN_SUBNET_EXT_IDS_KEY: '%s %s' % (fake_subnet1.id, fake_subnet2.id)}}) self.router.ports.append(p1) res = self.helper._find_ls_for_lr(self.router) self.assertListEqual(['neutron-foo1', 'neutron-foo2'], res) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__find_ls_for_lr_subnet_not_found(self, net_cli): fake_subnet1 = fakes.FakeSubnet.create_one_subnet() fake_subnet1['network_id'] = 'foo1' fake_subnet2 = fakes.FakeSubnet.create_one_subnet() fake_subnet2['network_id'] = 'foo2' net_cli.return_value.show_subnet.side_effect = [ {'subnet': fake_subnet1}, n_exc.NotFound] p1 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': [], 'external_ids': { ovn_const.OVN_SUBNET_EXT_IDS_KEY: '%s %s' % (fake_subnet1.id, fake_subnet2.id)}}) self.router.ports.append(p1) res = self.helper._find_ls_for_lr(self.router) self.assertListEqual(['neutron-foo1'], res) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__find_ls_for_lr_gw_port(self, net_cli): p1 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': ['foo-gw-chassis'], 'external_ids': { ovn_const.OVN_SUBNET_EXT_IDS_KEY: self.member_subnet_id}}) self.router.ports.append(p1) result = self.helper._find_ls_for_lr(self.router) self.assertListEqual([], result) @mock.patch.object( ovn_helper.OvnProviderHelper, '_del_lb_to_lr_association') @mock.patch.object( ovn_helper.OvnProviderHelper, '_add_lb_to_lr_association') def test__update_lb_to_lr_association(self, add, delete): self._update_lb_to_lr_association.stop() self.helper._update_lb_to_lr_association(self.ref_lb1, self.router) lr_ref = self.ref_lb1.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY) add.assert_called_once_with(self.ref_lb1, self.router, lr_ref) delete.assert_not_called() @mock.patch.object( ovn_helper.OvnProviderHelper, '_del_lb_to_lr_association') @mock.patch.object( ovn_helper.OvnProviderHelper, '_add_lb_to_lr_association') def test__update_lb_to_lr_association_delete(self, add, delete): self._update_lb_to_lr_association.stop() self.helper._update_lb_to_lr_association( self.ref_lb1, self.router, delete=True) lr_ref = self.ref_lb1.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY) add.assert_not_called() delete.assert_called_once_with(self.ref_lb1, self.router, lr_ref) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__del_lb_to_lr_association(self, net_cli): lr_ref = self.ref_lb1.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY) upd_lr_ref = '%s,%s' % (lr_ref, self.router.name) self.helper._del_lb_to_lr_association( self.ref_lb1, self.router, upd_lr_ref) expected_calls = [ mock.call.db_set( 'Load_Balancer', self.ref_lb1.uuid, (('external_ids', {ovn_const.LB_EXT_IDS_LR_REF_KEY: lr_ref}))), mock.call.lr_lb_del( self.router.uuid, self.ref_lb1.uuid, if_exists=True)] self.helper.ovn_nbdb_api.assert_has_calls( expected_calls) self.helper.ovn_nbdb_api.db_remove.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__del_lb_to_lr_association_no_lr_ref(self, net_cli): lr_ref = '' self.helper._del_lb_to_lr_association( self.ref_lb1, self.router, lr_ref) self.helper.ovn_nbdb_api.db_set.assert_not_called() self.helper.ovn_nbdb_api.db_remove.assert_not_called() self.helper.ovn_nbdb_api.lr_lb_del.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__del_lb_to_lr_association_lr_ref_empty_after(self, net_cli): lr_ref = self.router.name self.helper._del_lb_to_lr_association( self.ref_lb1, self.router, lr_ref) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, 'external_ids', ovn_const.LB_EXT_IDS_LR_REF_KEY) self.helper.ovn_nbdb_api.lr_lb_del.assert_called_once_with( self.router.uuid, self.ref_lb1.uuid, if_exists=True) self.helper.ovn_nbdb_api.db_set.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__del_lb_to_lr_association_from_ls(self, f_ls): # This test if LB is deleted from Logical_Router_Port # Logical_Switch. f_ls.return_value = ['neutron-xyz', 'neutron-qwr'] self.helper._del_lb_to_lr_association(self.ref_lb1, self.router, '') self.helper.ovn_nbdb_api.ls_lb_del.assert_has_calls([ (mock.call('neutron-xyz', self.ref_lb1.uuid, if_exists=True)), (mock.call('neutron-qwr', self.ref_lb1.uuid, if_exists=True))]) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__add_lb_to_lr_association(self, f_ls): lr_ref = 'foo' f_ls.return_value = ['neutron-xyz', 'neutron-qwr'] self.helper._add_lb_to_lr_association( self.ref_lb1, self.router, lr_ref) self.helper.ovn_nbdb_api.lr_lb_add.assert_called_once_with( self.router.uuid, self.ref_lb1.uuid, may_exist=True) self.helper.ovn_nbdb_api.ls_lb_add.assert_has_calls([ (mock.call('neutron-xyz', self.ref_lb1.uuid, may_exist=True)), (mock.call('neutron-qwr', self.ref_lb1.uuid, may_exist=True))]) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', {'lr_ref': 'foo,%s' % self.router.name})) def test__find_lr_of_ls(self): lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', 'neutron:cidrs': '10.10.10.1/24', ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': 'lrp-foo-name'}, }) lsp2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router2', 'neutron:cidrs': '10.10.10.2/24', ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': 'lrp-bar-name'}, }) lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'name': 'lrp-foo-name', }) lr = fakes.FakeOVNRouter.create_one_router( attrs={ 'name': 'router1', 'ports': [lrp]}) ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [lsp2, lsp]}) (self.helper.ovn_nbdb_api.get_lrs.return_value. execute.return_value) = [lr] returned_lr = self.helper._find_lr_of_ls(ls, '10.10.10.1') self.assertEqual(lr, returned_lr) def test__find_lr_of_ls_gw_port_id(self): lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': 'lrp-lrp-foo-name'} }) lr = fakes.FakeOVNRouter.create_one_router( attrs={ 'name': 'router1', 'ports': [], 'external_ids': { 'neutron:gw_port_id': 'lrp-foo-name'}}) ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [lsp]}) (self.helper.ovn_nbdb_api.get_lrs.return_value. execute.return_value) = [lr] returned_lr = self.helper._find_lr_of_ls(ls) self.assertEqual(lr, returned_lr) def test__find_lr_of_ls_no_lrp_name(self): lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'}, 'type': 'router', 'options': { 'router-port': None} }) ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [lsp]}) returned_lr = self.helper._find_lr_of_ls(ls) self.assertIsNone(returned_lr) def test__find_lr_of_ls_no_lrp(self): ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': []}) returned_lr = self.helper._find_lr_of_ls(ls) (self.helper.ovn_nbdb_api.tables['Logical_Router'].rows. values.assert_not_called()) self.assertIsNone(returned_lr) def test__update_lb_to_ls_association_empty_network_and_subnet(self): self._update_lb_to_ls_association.stop() returned_commands = self.helper._update_lb_to_ls_association( self.ref_lb1, associate=True) self.assertListEqual(returned_commands, []) def test__update_lb_to_ls_association_network(self): self._update_lb_to_ls_association.stop() self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=True) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) ls_refs = {'ls_refs': '{"%s": 2}' % self.network.name} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', ls_refs)) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__update_lb_to_ls_association_subnet(self, net_cli): self._update_lb_to_ls_association.stop() subnet = fakes.FakeSubnet.create_one_subnet( attrs={'id': 'foo_subnet_id', 'name': 'foo_subnet_name', 'network_id': 'foo_network_id'}) net_cli.return_value.show_subnet.return_value = { 'subnet': subnet} self.helper._update_lb_to_ls_association( self.ref_lb1, subnet_id=subnet.id, associate=True) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( 'neutron-foo_network_id') def test__update_lb_to_ls_association_empty_ls_refs(self): self._update_lb_to_ls_association.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.ref_lb1.external_ids.pop('ls_refs') self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid) self.helper.ovn_nbdb_api.ls_lb_add.assert_called_once_with( self.network.uuid, self.ref_lb1.uuid, may_exist=True) ls_refs = {'ls_refs': '{"%s": 1}' % self.network.name} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', ls_refs)) def test__update_lb_to_ls_association_no_ls(self): self._update_lb_to_ls_association.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. side_effect) = [idlutils.RowNotFound] returned_commands = self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.assertListEqual([], returned_commands) def test__update_lb_to_ls_association_network_disassociate(self): self._update_lb_to_ls_association.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', {'ls_refs': '{}'})) self.helper.ovn_nbdb_api.ls_lb_del.assert_called_once_with( self.network.uuid, self.ref_lb1.uuid, if_exists=True) def test__update_lb_to_ls_association_network_dis_ls_not_found(self): self._update_lb_to_ls_association.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. side_effect) = [idlutils.RowNotFound] self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', {'ls_refs': '{}'})) self.helper.ovn_nbdb_api.ls_lb_del.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__update_lb_to_ls_association_network_dis_net_not_found( self, net_cli): net_cli.return_value.show_subnet.side_effect = n_exc.NotFound self._update_lb_to_ls_association.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.helper._update_lb_to_ls_association( self.ref_lb1, subnet_id='foo', associate=False) self.helper.ovn_nbdb_api.ls_get.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_not_called() self.helper.ovn_nbdb_api.ls_lb_del.assert_not_called() def test__update_lb_to_ls_association_disassoc_ls_not_in_ls_refs(self): self._update_lb_to_ls_association.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.ref_lb1.external_ids.pop('ls_refs') self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False) self.helper.ovn_nbdb_api.ls_lb_del.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_not_called() def test__update_lb_to_ls_association_disassoc_multiple_refs(self): self._update_lb_to_ls_association.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network # multiple refs ls_refs = {'ls_refs': '{"%s": 2}' % self.network.name} self.ref_lb1.external_ids.update(ls_refs) self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) exp_ls_refs = {'ls_refs': '{"%s": 1}' % self.network.name} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', exp_ls_refs)) def test_logical_switch_port_update_event_vip_port(self): self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') attrs = { 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: '10.0.0.1'}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) self.switch_port_event.run(mock.ANY, row, mock.ANY) expected_call = { 'info': {'action': 'associate', 'vip_fip': '10.0.0.1', 'ovn_lb': self.ovn_lb}, 'type': 'handle_vip_fip'} self.mock_add_request.assert_called_once_with(expected_call) def test_logical_switch_port_update_event_missing_port_name(self): self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) attrs = {'external_ids': {}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) self.switch_port_event.run(mock.ANY, row, mock.ANY) self.mock_add_request.assert_not_called() def test_logical_switch_port_update_event_empty_fip(self): self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') attrs = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) self.switch_port_event.run(mock.ANY, row, mock.ANY) expected_call = { 'info': {'action': 'disassociate', 'vip_fip': None, 'ovn_lb': self.ovn_lb}, 'type': 'handle_vip_fip'} self.mock_add_request.assert_called_once_with(expected_call) def test_logical_switch_port_update_event_not_vip_port(self): self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) port_name = 'foo' row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}}) self.switch_port_event.run(mock.ANY, row, mock.ANY) self.mock_add_request.assert_not_called() @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_vip_port_update_handler_lb_not_found(self, lb): lb.side_effect = [idlutils.RowNotFound for _ in range(5)] self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') attrs = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) self.switch_port_event.run(mock.ANY, row, mock.ANY) self.mock_add_request.assert_not_called() @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_vip_port_update_handler_multiple_lbs(self, lb): lb1 = mock.MagicMock() lb2 = mock.MagicMock() lb.return_value = [lb1, lb2] self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') attrs = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) self.switch_port_event.run(mock.ANY, row, mock.ANY) def expected_call(lb): return {'type': 'handle_vip_fip', 'info': {'action': mock.ANY, 'vip_fip': None, 'ovn_lb': lb}} self.mock_add_request.assert_has_calls([ mock.call(expected_call(lb1)), mock.call(expected_call(lb2))]) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_disassociate(self, flb): lb = mock.MagicMock() fip_info = { 'action': 'disassociate', 'vip_fip': None, 'ovn_lb': lb} flb.return_value = lb self.helper.handle_vip_fip(fip_info) calls = [ mock.call.db_remove( 'Load_Balancer', lb.uuid, 'external_ids', 'neutron:vip_fip'), mock.call.db_clear('Load_Balancer', lb.uuid, 'vips'), mock.call.db_set('Load_Balancer', lb.uuid, ('vips', {}))] self.helper.ovn_nbdb_api.assert_has_calls(calls) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_associate(self, fb): lb = mock.MagicMock() fip_info = { 'action': 'associate', 'vip_fip': '10.0.0.123', 'ovn_lb': lb} members = 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id) external_ids = { 'listener_foo': '80:pool_%s' % self.pool_id, 'pool_%s' % self.pool_id: members, 'neutron:vip': '172.26.21.20'} lb.external_ids = external_ids fb.return_value = lb self.helper.handle_vip_fip(fip_info) calls = [ mock.call.db_set( 'Load_Balancer', lb.uuid, ('external_ids', {'neutron:vip_fip': '10.0.0.123'})), mock.call.db_clear('Load_Balancer', lb.uuid, 'vips'), mock.call.db_set( 'Load_Balancer', lb.uuid, ('vips', {'10.0.0.123:80': '192.168.2.149:1010', '172.26.21.20:80': '192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.assert_has_calls(calls) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_has_no_fip(self, net_cli): lb = mock.MagicMock() info = { 'id': self.member_id, 'pool_id': self.pool_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} external_ids = { 'neutron:vip_fip': ''} lb.external_ids = external_ids self.mock_find_lb_pool_key.return_value = lb self.helper.handle_member_dvr(info) net_cli.show_subnet.assert_not_called() self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_fip_no_ls_ports(self, net_cli): lb = mock.MagicMock() info = { 'id': self.member_id, 'subnet_id': self.member_subnet_id, 'pool_id': self.pool_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} external_ids = { 'neutron:vip_fip': '11.11.11.11'} lb.external_ids = external_ids self.mock_find_lb_pool_key.return_value = lb fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': {}, 'ports': {}}) self.helper.ovn_nbdb_api.lookup.return_value = fake_ls self.helper.handle_member_dvr(info) self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_fip_no_subnet(self, net_cli): lb = mock.MagicMock() info = { 'id': self.member_id, 'subnet_id': self.member_subnet_id, 'pool_id': self.pool_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} external_ids = { 'neutron:vip_fip': '11.11.11.11'} lb.external_ids = external_ids self.mock_find_lb_pool_key.return_value = lb net_cli.return_value.show_subnet.side_effect = [n_exc.NotFound] self.helper.handle_member_dvr(info) self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_fip_no_ls(self, net_cli): lb = mock.MagicMock() info = { 'id': self.member_id, 'subnet_id': self.member_subnet_id, 'pool_id': self.pool_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} external_ids = { 'neutron:vip_fip': '11.11.11.11'} lb.external_ids = external_ids self.mock_find_lb_pool_key.return_value = lb self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] self.helper.handle_member_dvr(info) self.helper.ovn_nbdb_api.db_clear.assert_not_called() def _test_handle_member_dvr_lb_fip( self, net_cli, action=ovn_const.REQ_INFO_MEMBER_ADDED): lb = mock.MagicMock() fake_port = fakes.FakePort.create_one_port( attrs={'allowed_address_pairs': ''}) info = { 'id': self.member_id, 'address': fake_port['fixed_ips'][0]['ip_address'], 'pool_id': self.pool_id, 'subnet_id': fake_port['fixed_ips'][0]['subnet_id'], 'action': action} member_subnet = fakes.FakeSubnet.create_one_subnet() member_subnet['id'] = self.member_subnet_id member_subnet['network_id'] = 'foo' net_cli.return_value.show_subnet.return_value = { 'subnet': member_subnet} fake_lsp = fakes.FakeOVNPort.from_neutron_port( fake_port) fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': {}, 'name': 'foo', 'ports': [fake_lsp]}) self.helper.ovn_nbdb_api.lookup.return_value = fake_ls fake_nat = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ip': '22.22.22.22', 'external_ids': { ovn_const.OVN_FIP_EXT_ID_KEY: 'fip_id'}}) fip_info = { 'floatingip': { 'description': 'bar'}} net_cli.return_value.show_floatingip.return_value = fip_info self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [fake_nat] external_ids = { ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '11.11.11.11'} lb.external_ids = external_ids self.mock_find_lb_pool_key.return_value = lb self.helper.handle_member_dvr(info) if action == ovn_const.REQ_INFO_MEMBER_ADDED: calls = [ mock.call.lookup('Logical_Switch', 'neutron-foo'), mock.call.db_find_rows('NAT', ('external_ids', '=', { ovn_const.OVN_FIP_PORT_EXT_ID_KEY: fake_lsp.name})), mock.ANY, mock.call.db_clear('NAT', fake_nat.uuid, 'external_mac'), mock.ANY, mock.call.db_clear('NAT', fake_nat.uuid, 'logical_port'), mock.ANY] self.helper.ovn_nbdb_api.assert_has_calls(calls) else: (net_cli.return_value.show_floatingip. assert_called_once_with('fip_id')) (net_cli.return_value.update_floatingip. assert_called_once_with('fip_id', { 'floatingip': {'description': 'bar'}})) self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_fip_member_added(self, net_cli): self._test_handle_member_dvr_lb_fip(net_cli) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_fip_member_deleted(self, net_cli): self._test_handle_member_dvr_lb_fip( net_cli, action=ovn_const.REQ_INFO_MEMBER_DELETED) def test_ovsdb_connections(self): ovn_helper.OvnProviderHelper.ovn_nbdb_api = None ovn_helper.OvnProviderHelper.ovn_nbdb_api_for_events = None prov_helper1 = ovn_helper.OvnProviderHelper() prov_helper2 = ovn_helper.OvnProviderHelper() # One connection for API requests self.assertIs(prov_helper1.ovn_nbdb_api, prov_helper2.ovn_nbdb_api) # One connection to handle events self.assertIs(prov_helper1.ovn_nbdb_api_for_events, prov_helper2.ovn_nbdb_api_for_events) prov_helper2.shutdown() prov_helper1.shutdown() def test_create_vip_port_vip_selected(self): expected_dict = { 'port': {'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.loadbalancer_id), 'fixed_ips': [{'subnet_id': self.vip_dict['vip_subnet_id'], 'ip_address':'10.1.10.1'}], 'network_id': self.vip_dict['vip_network_id'], 'admin_state_up': True, 'project_id': self.project_id}} with mock.patch.object(clients, 'get_neutron_client') as net_cli: self.vip_dict['vip_address'] = '10.1.10.1' self.helper.create_vip_port(self.project_id, self.loadbalancer_id, self.vip_dict) expected_call = [ mock.call().create_port(expected_dict)] net_cli.assert_has_calls(expected_call) def test_create_vip_port_vip_not_selected(self): expected_dict = { 'port': {'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.loadbalancer_id), 'fixed_ips': [{'subnet_id': self.vip_dict['vip_subnet_id']}], 'network_id': self.vip_dict['vip_network_id'], 'admin_state_up': True, 'project_id': self.project_id}} with mock.patch.object(clients, 'get_neutron_client') as net_cli: self.helper.create_vip_port(self.project_id, self.loadbalancer_id, self.vip_dict) expected_call = [ mock.call().create_port(expected_dict)] net_cli.assert_has_calls(expected_call) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_create_vip_port_vip_selected_already_exist(self, net_cli): net_cli.return_value.create_port.side_effect = [ n_exc.IpAddressAlreadyAllocatedClient] net_cli.return_value.list_ports.return_value = { 'ports': [ {'name': 'ovn-lb-vip-' + self.loadbalancer_id, 'id': self.loadbalancer_id}]} self.vip_dict['vip_address'] = '10.1.10.1' ret = self.helper.create_vip_port( self.project_id, self.loadbalancer_id, self.vip_dict) expected = { 'port': { 'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.loadbalancer_id), 'id': self.loadbalancer_id}} self.assertDictEqual(expected, ret) expected_call = [ mock.call().list_ports( network_id='%s' % self.vip_dict['vip_network_id'], name='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.loadbalancer_id))] net_cli.assert_has_calls(expected_call) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_create_vip_port_vip_selected_other_allocation_exist( self, net_cli): net_cli.return_value.create_port.side_effect = [ n_exc.IpAddressAlreadyAllocatedClient] net_cli.return_value.list_ports.return_value = { 'ports': []} self.vip_dict['vip_address'] = '10.1.10.1' self.assertRaises( n_exc.IpAddressAlreadyAllocatedClient, self.helper.create_vip_port, self.project_id, self.loadbalancer_id, self.vip_dict) expected_call = [ mock.call().list_ports( network_id='%s' % self.vip_dict['vip_network_id'], name='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.loadbalancer_id))] net_cli.assert_has_calls(expected_call) self.helper._update_status_to_octavia.assert_not_called() def test_get_pool_member_id(self): ret = self.helper.get_pool_member_id( self.pool_id, mem_addr_port='192.168.2.149:1010') self.assertEqual(self.member_id, ret) def test__get_existing_pool_members(self): ret = self.helper._get_existing_pool_members(self.pool_id) self.assertEqual(ret, self.member_line) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lb_by_pool_id') def test__get_existing_pool_members_exception(self, folbpi): folbpi.return_value = (None, None) self.assertRaises(exceptions.DriverError, self.helper._get_existing_pool_members, self.pool_id) def test__frame_lb_vips(self): ret = self.helper._frame_vip_ips(self.ovn_lb.external_ids) expected = {'10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'} self.assertEqual(expected, ret) def test__frame_lb_vips_disabled(self): self.ovn_lb.external_ids['enabled'] = 'False' ret = self.helper._frame_vip_ips(self.ovn_lb.external_ids) self.assertEqual({}, ret) def test__frame_lb_vips_ipv6(self): self.member_address = '2001:db8::1' self.member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: 'fc00::', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '2002::', 'pool_%s' % self.pool_id: self.member_line, 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} ret = self.helper._frame_vip_ips(self.ovn_lb.external_ids) expected = {'[2002::]:80': '[2001:db8::1]:1010', '[fc00::]:80': '[2001:db8::1]:1010'} self.assertEqual(expected, ret) def test_check_lb_protocol(self): self.ovn_lb.protocol = ['tcp'] ret = self.helper.check_lb_protocol(self.listener_id, 'udp') self.assertFalse(ret) ret = self.helper.check_lb_protocol(self.listener_id, 'UDP') self.assertFalse(ret) ret = self.helper.check_lb_protocol(self.listener_id, 'sctp') self.assertFalse(ret) ret = self.helper.check_lb_protocol(self.listener_id, 'SCTP') self.assertFalse(ret) ret = self.helper.check_lb_protocol(self.listener_id, 'tcp') self.assertTrue(ret) ret = self.helper.check_lb_protocol(self.listener_id, 'TCP') self.assertTrue(ret) def test_check_lb_protocol_no_listener(self): self.ovn_lb.external_ids = [] ret = self.helper.check_lb_protocol(self.listener_id, 'TCP') self.assertTrue(ret) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_check_lb_protocol_no_lb(self, fol): fol.return_value = None ret = self.helper.check_lb_protocol(self.listener_id, 'TCP') self.assertFalse(ret) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_members') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def _test_hm_create(self, protocol, members, folbpi, uhm, net_cli): fake_subnet = fakes.FakeSubnet.create_one_subnet() pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.protocol = [protocol] folbpi.return_value = (pool_key, self.ovn_hm_lb) uhm.return_value = True net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet} status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) if members: self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) vip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] + ':' + str(self.listener['protocol_port'])) options = {'interval': '6', 'timeout': '7', 'failure_count': '5', 'success_count': '3'} external_ids = {ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id} kwargs = {'vip': vip, 'options': options, 'external_ids': external_ids} self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer_Health_Check', **kwargs) self.helper.ovn_nbdb_api.db_add.assert_called_once_with( 'Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', mock.ANY) def test_hm_create_tcp(self): self._test_hm_create('tcp', False) def test_hm_create_udp(self): self._test_hm_create('udp', False) def test_hm_create_tcp_pool_members(self): pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = self.member_line self._test_hm_create('tcp', True) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_no_vip_port(self, folbpi): pool_key = 'pool_%s' % self.pool_id listener_key = 'listener_%s' % self.listener_id self.ovn_hm_lb.external_ids.pop(listener_key) folbpi.return_value = (pool_key, self.ovn_hm_lb) status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ONLINE) vip = [] options = {'interval': '6', 'timeout': '7', 'failure_count': '5', 'success_count': '3'} self.ovn_hm.external_ids.pop(ovn_const.LB_EXT_IDS_HM_KEY) external_ids = {ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id} kwargs = {'vip': vip, 'options': options, 'external_ids': external_ids} self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer_Health_Check', **kwargs) self.helper.ovn_nbdb_api.db_add.assert_called_once_with( 'Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', mock.ANY) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_offline(self, folbpi): pool_key = 'pool_%s' % self.pool_id folbpi.return_value = (pool_key, self.ovn_hm_lb) self.health_monitor['admin_state_up'] = False status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.OFFLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_lb_not_found(self, folbpi): folbpi.return_value = (None, None) status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_pool_not_found(self, folbpi): folbpi.return_value = ('pool_closed', self.ovn_hm_lb) status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) self.assertEqual(status['pools'][0]['operating_status'], constants.OFFLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_vip_not_found(self, folbpi): pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids.pop(ovn_const.LB_EXT_IDS_VIP_KEY) folbpi.return_value = (pool_key, self.ovn_hm_lb) status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_lsp_not_found(self, folbpi, net_cli): pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = self.member_line folbpi.return_value = (pool_key, self.ovn_hm_lb) net_cli.return_value.show_subnet.side_effect = [n_exc.NotFound] status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_hm_port_not_found(self, folbpi, net_cli): fake_subnet = fakes.FakeSubnet.create_one_subnet() fake_port = fakes.FakePort.create_one_port( attrs={'allowed_address_pairs': ''}) member = {'id': uuidutils.generate_uuid(), 'address': fake_port['fixed_ips'][0]['ip_address'], 'protocol_port': '9999', 'subnet_id': fake_subnet['id'], 'pool_id': self.pool_id, 'admin_state_up': True, 'old_admin_state_up': True} member_line = ( 'member_%s_%s:%s_%s' % (member['id'], member['address'], member['protocol_port'], member['subnet_id'])) pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = member_line folbpi.return_value = (pool_key, self.ovn_hm_lb) net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet} net_cli.return_value.list_ports.return_value = {'ports': []} fake_lsp = fakes.FakeOVNPort.from_neutron_port(fake_port) fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': {}, 'ports': [fake_lsp]}) self.helper.ovn_nbdb_api.lookup.return_value = fake_ls status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_hm_source_ip_not_found(self, folbpi, net_cli): fake_subnet = fakes.FakeSubnet.create_one_subnet() fake_port = fakes.FakePort.create_one_port( attrs={'allowed_address_pairs': ''}) member = {'id': uuidutils.generate_uuid(), 'address': fake_port['fixed_ips'][0]['ip_address'], 'protocol_port': '9999', 'subnet_id': fake_subnet['id'], 'pool_id': self.pool_id, 'admin_state_up': True, 'old_admin_state_up': True} member_line = ( 'member_%s_%s:%s_%s' % (member['id'], member['address'], member['protocol_port'], member['subnet_id'])) pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = member_line folbpi.return_value = (pool_key, self.ovn_hm_lb) net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet} fake_lsp = fakes.FakeOVNPort.from_neutron_port(fake_port) fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': {}, 'ports': [fake_lsp]}) self.helper.ovn_nbdb_api.lookup.return_value = fake_ls status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_db_exception(self, folbpi): pool_key = 'pool_%s' % self.pool_id folbpi.return_value = (pool_key, self.ovn_hm_lb) self.helper.ovn_nbdb_api.db_create.side_effect = [RuntimeError] status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_hm_by_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb') def test_hm_create_then_listener_create(self, get_ovn_lb, lookup_hm): get_ovn_lb.return_value = self.ovn_hm_lb lookup_hm.return_value = self.ovn_hm self.ovn_hm_lb.health_check = self.ovn_hm self.listener['admin_state_up'] = True status = self.helper.listener_create(self.listener) vip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] + ':' + str(self.listener['protocol_port'])) self.helper.ovn_nbdb_api.db_set.assert_called_with( 'Load_Balancer_Health_Check', self.ovn_hm.uuid, ('vip', vip)) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_hm_by_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb') def test_hm_create_then_listener_create_no_hm(self, get_ovn_lb, lookup_hm): get_ovn_lb.return_value = self.ovn_hm_lb lookup_hm.return_value = None self.ovn_hm_lb.health_check = self.ovn_hm self.listener['admin_state_up'] = True status = self.helper.listener_create(self.listener) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') @mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_hm_by_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb') def test_hm_create_then_listener_create_no_vip(self, get_ovn_lb, lookup_hm, refresh_vips): get_ovn_lb.return_value = self.ovn_hm_lb lookup_hm.return_value = self.ovn_hm self.ovn_hm_lb.health_check = self.ovn_hm self.ovn_hm_lb.external_ids.pop(ovn_const.LB_EXT_IDS_VIP_KEY) self.listener['admin_state_up'] = True status = self.helper.listener_create(self.listener) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_update(self, folbfhi): folbfhi.return_value = (self.ovn_hm, self.ovn_hm_lb) status = self.helper.hm_update(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_update_no_admin_state_up(self, folbfhi): folbfhi.return_value = (self.ovn_hm, self.ovn_hm_lb) self.ovn_hm_lb.pop('admin_state_up') status = self.helper.hm_update(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_update_offline(self, folbfhi): folbfhi.return_value = (self.ovn_hm, self.ovn_hm_lb) self.health_monitor['admin_state_up'] = False status = self.helper.hm_update(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.OFFLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_update_hm_not_found(self, folbfhi): folbfhi.return_value = (None, None) status = self.helper.hm_update(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_update_lb_not_found(self, folbpi, folbfhi): folbfhi.return_value = (self.ovn_hm, None) folbpi.return_value = (None, None) status = self.helper.hm_update(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) def test_hm_delete(self): self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.return_value = [self.ovn_hm] self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_hm_lb] status = self.helper.hm_delete(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) expected_clear_calls = [ mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'ip_port_mappings')] expected_remove_calls = [ mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', self.ovn_hm.uuid)] expected_destroy_calls = [ mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid)] self.helper.ovn_nbdb_api.db_clear.assert_has_calls( expected_clear_calls) self.helper.ovn_nbdb_api.db_remove.assert_has_calls( expected_remove_calls) self.helper.ovn_nbdb_api.db_destroy.assert_has_calls( expected_destroy_calls) def test_hm_delete_row_not_found(self): self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.return_value = [self.ovn_hm] self.helper.ovn_nbdb_api.db_find_rows.side_effect = ( [idlutils.RowNotFound]) status = self.helper.hm_delete(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) self.helper.ovn_nbdb_api.db_clear.assert_not_called() def test_hm_delete_hm_not_found(self): self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.return_value = [self.ovn_hm] self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_hm_lb] self.health_monitor['id'] = 'id_not_found' status = self.helper.hm_delete(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) self.helper.ovn_nbdb_api.db_clear.assert_not_called() def test_hm_update_event_offline(self): self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_hm_lb] self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent( self.helper) src_ip = '10.22.33.4' row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip': self.member_address, 'logical_port': 'a-logical-port', 'src_ip': src_ip, 'port': self.member_port, 'protocol': self.ovn_hm_lb.protocol, 'status': ['offline']}) self.hm_update_event.run('update', row, mock.ANY) expected = { 'info': {'ovn_lb': self.ovn_hm_lb, 'ip': self.member_address, 'port': self.member_port, 'status': ['offline']}, 'type': 'hm_update_event'} self.mock_add_request.assert_called_once_with(expected) self.helper.ovn_nbdb_api.db_find_rows.assert_called_once_with( 'Load_Balancer', (('ip_port_mappings', '=', {self.member_address: 'a-logical-port:' + src_ip}), ('protocol', '=', self.ovn_hm_lb.protocol))) def test_hm_update_event_lb_not_found(self): self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [] self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip': self.member_address, 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': self.member_port, 'protocol': self.ovn_hm_lb.protocol, 'status': ['offline']}) self.hm_update_event.run('update', row, mock.ANY) self.mock_add_request.assert_not_called() def test_hm_update_event_lb_row_not_found(self): self.helper.ovn_nbdb_api.db_find_rows.\ side_effect = [idlutils.RowNotFound] self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip': self.member_address, 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': self.member_port, 'protocol': self.ovn_hm_lb.protocol, 'status': ['offline']}) self.hm_update_event.run('update', row, mock.ANY) self.mock_add_request.assert_not_called() def test_hm_update_event_lb_protocol_not_found(self): self.helper.ovn_nbdb_api.db_find_rows.\ side_effect = [self.ovn_hm_lb, idlutils.RowNotFound] self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip': self.member_address, 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': self.member_port, 'protocol': 'unknown', 'status': ['offline']}) self.hm_update_event.run('update', row, mock.ANY) self.mock_add_request.assert_not_called() def _test_hm_update_no_member(self, bad_ip, bad_port): fake_subnet = fakes.FakeSubnet.create_one_subnet() fake_port = fakes.FakePort.create_one_port( attrs={'allowed_address_pairs': ''}) ip = fake_port['fixed_ips'][0]['ip_address'] member = {'id': uuidutils.generate_uuid(), 'address': ip, 'protocol_port': self.member_port, 'subnet_id': fake_subnet['id'], 'pool_id': self.pool_id, 'admin_state_up': True, 'old_admin_state_up': True} member_line = ( 'member_%s_%s:%s_%s' % (member['id'], member['address'], member['protocol_port'], member['subnet_id'])) pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = member_line if bad_ip: ip = 'bad-ip' port = self.member_port if bad_port: port = 'bad-port' info = { 'ovn_lb': self.ovn_hm_lb, 'ip': ip, 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': port, 'protocol': self.ovn_hm_lb.protocol, 'status': ['offline']} status = self.helper.hm_update_event(info) self.assertIsNone(status) def test_hm_update_event_member_ip_not_found(self): self._test_hm_update_no_member(True, False) def test_hm_update_event_member_port_not_found(self): self._test_hm_update_no_member(False, True) def _test_hm_update_status(self, ip, port, member_status, lb_status=constants.ONLINE, pool_status=constants.ONLINE): fake_lb = fakes.FakeLB( uuid=uuidutils.generate_uuid(), admin_state_up=True, name='fake_lb', ext_ids={}) fake_pool = fakes.FakePool( uuid=uuidutils.generate_uuid(), admin_state_up=True, name='fake_pool') info = { 'ovn_lb': self.ovn_hm_lb, 'ip': ip, 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': port, 'protocol': self.ovn_hm_lb.protocol, 'status': [member_status]} fake_lb.operating_status = lb_status fake_pool.operating_status = pool_status self.octavia_driver_lib.get_pool.return_value = fake_pool self.octavia_driver_lib.get_loadbalancer.return_value = fake_lb status = self.helper.hm_update_event(info) return status def _add_member(self, subnet, port): fake_port = fakes.FakePort.create_one_port( attrs={'allowed_address_pairs': ''}) ip = fake_port['fixed_ips'][0]['ip_address'] member = {'id': uuidutils.generate_uuid(), 'address': ip, 'protocol_port': port, 'subnet_id': subnet['id'], 'pool_id': self.pool_id, 'admin_state_up': True, 'old_admin_state_up': True} member_line = ( 'member_%s_%s:%s_%s' % (member['id'], member['address'], member['protocol_port'], member['subnet_id'])) pool_key = 'pool_%s' % self.pool_id existing_members = self.ovn_hm_lb.external_ids[pool_key] if existing_members: existing_members = ','.join([existing_members, member_line]) self.ovn_hm_lb.external_ids[pool_key] = existing_members else: self.ovn_hm_lb.external_ids[pool_key] = member_line return member def test_hm_update_status_offline(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member = self._add_member(fake_subnet, 8080) status = self._test_hm_update_status(member['address'], '8080', 'offline') self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ERROR) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) def test_hm_update_status_offline_lb_pool_offline(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member = self._add_member(fake_subnet, 8080) status = self._test_hm_update_status(member['address'], '8080', 'offline', lb_status=constants.OFFLINE, pool_status=constants.OFFLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.OFFLINE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) def test_hm_update_status_online(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member = self._add_member(fake_subnet, 8080) status = self._test_hm_update_status(member['address'], '8080', 'online') self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) def test_hm_update_status_online_lb_pool_offline(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member = self._add_member(fake_subnet, 8080) status = self._test_hm_update_status(member['address'], '8080', 'online', lb_status=constants.OFFLINE, pool_status=constants.OFFLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) def test_hm_update_status_offline_two_members(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member_1 = self._add_member(fake_subnet, 8080) ip_1 = member_1['address'] member_2 = self._add_member(fake_subnet, 8081) ip_2 = member_2['address'] # This is the Octavia API version fake_member = fakes.FakeMember( uuid=member_2['id'], admin_state_up=True, name='member_2', project_id=self.project_id, address=ip_2, protocol_port=8081) # Second member ONLINE, operating_status should be DEGRADED # for Pool and Loadbalancer fake_member.operating_status = constants.ONLINE self.octavia_driver_lib.get_member.return_value = fake_member status = self._test_hm_update_status(ip_1, '8081', 'offline') self.assertEqual(status['members'][0]['operating_status'], constants.ERROR) self.assertEqual(status['pools'][0]['operating_status'], constants.DEGRADED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.DEGRADED) # Second member ERROR, operating_status should be ERROR # for Pool and Loadbalancer fake_member.operating_status = constants.ERROR self.octavia_driver_lib.get_member.return_value = fake_member status = self._test_hm_update_status(ip_1, '8081', 'offline') self.assertEqual(status['members'][0]['operating_status'], constants.ERROR) self.assertEqual(status['pools'][0]['operating_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) def test_hm_update_status_online_two_members(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member_1 = self._add_member(fake_subnet, 8080) ip_1 = member_1['address'] member_2 = self._add_member(fake_subnet, 8081) ip_2 = member_2['address'] # This is the Octavia API version fake_member = fakes.FakeMember( uuid=member_2['id'], admin_state_up=True, name='member_2', project_id=self.project_id, address=ip_2, protocol_port=8081) # Second member ERROR, operating_status should be DEGRADED # for Pool and Loadbalancer fake_member.operating_status = constants.ERROR self.octavia_driver_lib.get_member.return_value = fake_member status = self._test_hm_update_status(ip_1, '8081', 'online') self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['operating_status'], constants.DEGRADED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.DEGRADED) # Second member ONLINE, operating_status should be ONLINE # for Pool and Loadbalancer fake_member.operating_status = constants.ONLINE self.octavia_driver_lib.get_member.return_value = fake_member status = self._test_hm_update_status(ip_1, '8081', 'online') self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4525883 ovn-octavia-provider-2.0.0/ovn_octavia_provider.egg-info/0000775000175000017500000000000000000000000023526 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641395.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider.egg-info/PKG-INFO0000664000175000017500000000404000000000000024621 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: ovn-octavia-provider Version: 2.0.0 Summary: OpenStack Octavia integration with OVN Home-page: https://docs.openstack.org/ovn-octavia-provider/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: =================================================================== ovn-octavia-provider - OVN Provider driver for Octavia LoadBalancer =================================================================== OVN provides virtual networking for Open vSwitch and is a component of the Open vSwitch project. This project provides integration between OpenStack Octavia and OVN. * Free software: Apache license * Source: https://opendev.org/openstack/ovn-octavia-provider * Bugs: https://bugs.launchpad.net/neutron/+bugs?field.tag=ovn-octavia-provider * Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss * IRC: #openstack-neutron on OFTC. * Docs: https://docs.openstack.org/ovn-octavia-provider/latest Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ovn-octavia-provider.svg :target: https://governance.openstack.org/tc/reference/tags/index.html * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/ovn-octavia-provider Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641395.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider.egg-info/SOURCES.txt0000664000175000017500000000765700000000000025431 0ustar00zuulzuul00000000000000.coveragerc .pylintrc .stestr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt lower-constraints.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini devstack/local.conf.sample devstack/plugin.sh devstack/settings doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/pdf-index.rst doc/source/_static/.placeholder doc/source/admin/driver.rst doc/source/admin/index.rst doc/source/configuration/config.rst doc/source/configuration/index.rst doc/source/contributor/index.rst doc/source/contributor/loadbalancer.rst etc/octavia/.placeholder etc/octavia/conf.d/.placeholder etc/oslo-config-generator/ovn.conf ovn_octavia_provider/__init__.py ovn_octavia_provider/agent.py ovn_octavia_provider/driver.py ovn_octavia_provider/event.py ovn_octavia_provider/helper.py ovn_octavia_provider/i18n.py ovn_octavia_provider.egg-info/PKG-INFO ovn_octavia_provider.egg-info/SOURCES.txt ovn_octavia_provider.egg-info/dependency_links.txt ovn_octavia_provider.egg-info/entry_points.txt ovn_octavia_provider.egg-info/not-zip-safe ovn_octavia_provider.egg-info/pbr.json ovn_octavia_provider.egg-info/requires.txt ovn_octavia_provider.egg-info/top_level.txt ovn_octavia_provider/common/clients.py ovn_octavia_provider/common/config.py ovn_octavia_provider/common/constants.py ovn_octavia_provider/common/exceptions.py ovn_octavia_provider/common/utils.py ovn_octavia_provider/hacking/__init__.py ovn_octavia_provider/hacking/checks.py ovn_octavia_provider/ovsdb/impl_idl_ovn.py ovn_octavia_provider/ovsdb/ovsdb_monitor.py ovn_octavia_provider/tests/__init__.py ovn_octavia_provider/tests/functional/__init__.py ovn_octavia_provider/tests/functional/base.py ovn_octavia_provider/tests/functional/requirements.txt ovn_octavia_provider/tests/functional/test_agent.py ovn_octavia_provider/tests/functional/test_driver.py ovn_octavia_provider/tests/functional/test_integration.py ovn_octavia_provider/tests/unit/__init__.py ovn_octavia_provider/tests/unit/base.py ovn_octavia_provider/tests/unit/fakes.py ovn_octavia_provider/tests/unit/test_agent.py ovn_octavia_provider/tests/unit/test_driver.py ovn_octavia_provider/tests/unit/test_hacking.py ovn_octavia_provider/tests/unit/test_helper.py ovn_octavia_provider/tests/unit/common/__init__.py ovn_octavia_provider/tests/unit/common/test_clients.py ovn_octavia_provider/tests/unit/hacking/__init__.py ovn_octavia_provider/tests/unit/hacking/test_checks.py ovn_octavia_provider/tests/unit/ovsdb/__init__.py ovn_octavia_provider/tests/unit/ovsdb/test_impl_idl_ovn.py ovn_octavia_provider/tests/unit/schemas/ovn-nb.ovsschema ovn_octavia_provider/tests/unit/schemas/ovn-sb.ovsschema playbooks/configure_functional_job.yaml playbooks/post_functional_job.yaml playbooks/run_functional_job.yaml releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml releasenotes/notes/add-sctp-support-bedfed905e1f5a58.yaml releasenotes/notes/new-repository-for-ovn-octavia-provider-driver-dd81c4414c529c4e.yaml releasenotes/notes/ovn-octavia-provider-driver-multiple-protocols-4a93e184b8f374c7.yaml releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml releasenotes/source/README.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder roles/configure_functional_tests/README.rst roles/configure_functional_tests/defaults/main.yaml roles/configure_functional_tests/tasks/main.yaml roles/fetch_journal_log/README.rst roles/fetch_journal_log/defaults/main.yaml roles/fetch_journal_log/tasks/main.yaml roles/setup_logdir/README.rst roles/setup_logdir/defaults/main.yaml roles/setup_logdir/tasks/main.yaml tools/check_unit_test_structure.sh tools/coding-checks.sh tools/generate_config_file_samples.sh tools/pip_install_src_modules.sh zuul.d/base.yaml zuul.d/project.yaml././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641395.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider.egg-info/dependency_links.txt0000664000175000017500000000000100000000000027574 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641395.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider.egg-info/entry_points.txt0000664000175000017500000000040000000000000027016 0ustar00zuulzuul00000000000000[octavia.api.drivers] ovn = ovn_octavia_provider.driver:OvnProviderDriver [octavia.driver_agent.provider_agents] ovn = ovn_octavia_provider.agent:OvnProviderAgent [oslo.config.opts] octavia.api.drivers.ovn = ovn_octavia_provider.common.config:list_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641395.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider.egg-info/not-zip-safe0000664000175000017500000000000100000000000025754 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641395.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider.egg-info/pbr.json0000664000175000017500000000005600000000000025205 0ustar00zuulzuul00000000000000{"git_version": "ec5b1d5", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641395.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider.egg-info/requires.txt0000664000175000017500000000043200000000000026125 0ustar00zuulzuul00000000000000SQLAlchemy>=1.4.23 keystoneauth1>=3.14.0 netaddr>=0.7.18 neutron-lib>=2.16.0 octavia-lib>=2.2.0 oslo.config>=8.0.0 oslo.log>=4.3.0 oslo.messaging>=12.4.0 oslo.serialization>=2.28.1 oslo.utils>=4.5.0 ovs>=2.10.0 ovsdbapp>=1.7.0 pbr>=4.0.0 python-neutronclient>=6.7.0 tenacity>=6.0.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641395.0 ovn-octavia-provider-2.0.0/ovn_octavia_provider.egg-info/top_level.txt0000664000175000017500000000002500000000000026255 0ustar00zuulzuul00000000000000ovn_octavia_provider ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4605885 ovn-octavia-provider-2.0.0/playbooks/0000775000175000017500000000000000000000000017615 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/playbooks/configure_functional_job.yaml0000664000175000017500000000011200000000000025530 0ustar00zuulzuul00000000000000- hosts: all roles: - setup_logdir - configure_functional_tests ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/playbooks/post_functional_job.yaml0000664000175000017500000000014000000000000024535 0ustar00zuulzuul00000000000000- hosts: all roles: - fetch_journal_log - fetch-tox-output - fetch-subunit-output ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/playbooks/run_functional_job.yaml0000664000175000017500000000023000000000000024354 0ustar00zuulzuul00000000000000- hosts: all roles: - role: bindep bindep_profile: test bindep_dir: "{{ zuul_work_dir }}" - test-setup - ensure-tox - tox ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4405885 ovn-octavia-provider-2.0.0/releasenotes/0000775000175000017500000000000000000000000020303 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4605885 ovn-octavia-provider-2.0.0/releasenotes/notes/0000775000175000017500000000000000000000000021433 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml0000664000175000017500000000074700000000000027176 0ustar00zuulzuul00000000000000--- features: - | The OVN Octavia provider drvier now supports health monitoring. TCP and UDP Connect health monitors are now supported by the provider driver, when the underlying OVN version supports them. The health monitor uses the OVN distributed DHCP port as the source IP for messages by default, if one doesn't exist then a port will be created on each given subnet. The list of member ports to monitor is updated whenever one is created or deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/notes/add-sctp-support-bedfed905e1f5a58.yaml0000664000175000017500000000012600000000000030173 0ustar00zuulzuul00000000000000--- features: - | Add support for the SCTP protocol in the OVN provider driver. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=ovn-octavia-provider-2.0.0/releasenotes/notes/new-repository-for-ovn-octavia-provider-driver-dd81c4414c529c4e.yaml 22 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/notes/new-repository-for-ovn-octavia-provider-driver-dd81c440000664000175000017500000000036300000000000033570 0ustar00zuulzuul00000000000000--- prelude: > OVN Octavia provider driver has been created from the networking-ovn repository. upgrade: - | OVN Octavia Provider driver registers under the same entry point. There is no action to be done from operator side. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=ovn-octavia-provider-2.0.0/releasenotes/notes/ovn-octavia-provider-driver-multiple-protocols-4a93e184b8f374c7.yaml 22 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/notes/ovn-octavia-provider-driver-multiple-protocols-4a93e180000664000175000017500000000023100000000000033570 0ustar00zuulzuul00000000000000--- fixes: - | OVN Octavia provider driver now supports both TCP and UDP pool/listener protocols configured in the same Octavia Load Balancer. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=ovn-octavia-provider-2.0.0/releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml 22 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f20000664000175000017500000000040100000000000033302 0ustar00zuulzuul00000000000000--- fixes: - | Creating members without specifying a subnet ID is now supported. Since the subnet ID is an optional API argument, if not given the provider driver will now attempt to look it up via the pool ID that is a required argument. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4605885 ovn-octavia-provider-2.0.0/releasenotes/source/0000775000175000017500000000000000000000000021603 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/source/README.rst0000664000175000017500000000270000000000000023271 0ustar00zuulzuul00000000000000======================================== OVN Octavia Provider Release Notes Howto ======================================== Release notes are a new feature for documenting new features in OpenStack projects. Background on the process, tooling, and methodology is documented in a `mailing list post by Doug Hellmann `_. Writing release notes --------------------- For information on how to create release notes, please consult the `reno documentation `__. Please keep the following in your mind when you write release notes. * **Avoid using "prelude" section** for individual release notes. "prelude" section is for general comments about the release. * **Use one entry per section** (like "feature" or "upgrade"). All entries which belong to a same release will be merged and rendered, so there is less meaning to use multiple entries by a single topic. Maintaining release notes ------------------------- .. warning:: Avoid modifying an existing release note file even though it is related to your change. If you modify a release note file of a past release, the whole content will be shown in a latest release. The only allowed case is to update a release note in a same release. If you need to update a release note of a past release, edit a corresponding release note file in a stable branch directly. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4605885 ovn-octavia-provider-2.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000023231 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000025502 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4605885 ovn-octavia-provider-2.0.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000023740 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000026211 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/source/conf.py0000664000175000017500000002067400000000000023113 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # OVN Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/ovn-octavia-provider' openstackdocs_bug_project = 'neutron' openstackdocs_bug_tag = 'ovn-octavia-provider' openstackdocs_auto_name = False # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = u'2020, Neutron Developers' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'OVNOctaviaProviderReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'OVNOctaviaProviderReleaseNotes.tex', u'OVN Octavia Provider Release Notes Documentation', u'Neutron Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ovnoctaviaproviderreleasenotes', u'OVN Octavia Provider Release Notes Documentation', [u'Neutron Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'OVNOctaviaProviderReleaseNotes', u'OVN Octavia Provider Release Notes Documentation', u'Neutron Developers', 'OVNOctaviaProviderReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/source/index.rst0000664000175000017500000000035700000000000023451 0ustar00zuulzuul00000000000000=================================== OVN Octavia Provider Release Notes =================================== .. toctree:: :maxdepth: 1 unreleased xena wallaby victoria ussuri .. toctree:: :maxdepth: 1 README.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000015600000000000024466 0ustar00zuulzuul00000000000000============================= Current Series Release Notes ============================= .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000023661 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/source/victoria.rst0000664000175000017500000000021200000000000024150 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: stable/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000020600000000000023766 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: stable/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/releasenotes/source/xena.rst0000664000175000017500000000017200000000000023270 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: stable/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/requirements.txt0000664000175000017500000000124200000000000021075 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. keystoneauth1>=3.14.0 # Apache-2.0 netaddr>=0.7.18 # BSD neutron-lib>=2.16.0 # Apache-2.0 oslo.config>=8.0.0 # Apache-2.0 oslo.log>=4.3.0 # Apache-2.0 oslo.messaging>=12.4.0 # Apache-2.0 oslo.serialization>=2.28.1 # Apache-2.0 oslo.utils>=4.5.0 # Apache-2.0 ovs>=2.10.0 # Apache-2.0 ovsdbapp>=1.7.0 # Apache-2.0 pbr>=4.0.0 # Apache-2.0 SQLAlchemy>=1.4.23 # MIT tenacity>=6.0.0 # Apache-2.0 octavia-lib>=2.2.0 # Apache-2.0 python-neutronclient>=6.7.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4405885 ovn-octavia-provider-2.0.0/roles/0000775000175000017500000000000000000000000016736 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4605885 ovn-octavia-provider-2.0.0/roles/configure_functional_tests/0000775000175000017500000000000000000000000024363 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/roles/configure_functional_tests/README.rst0000664000175000017500000000076300000000000026060 0ustar00zuulzuul00000000000000Configure host to run on it Neutron functional/fullstack tests **Role Variables** .. zuul:rolevar:: tests_venv :default: {{ tox_envlist }} .. zuul:rolevar:: project_name :default: neutron .. zuul:rolevar:: base_dir :default: {{ ansible_user_dir }}/src/opendev.org .. zuul:rolevar:: gate_dest_dir :default: {{ base_dir }}/openstack .. zuul:rolevar:: devstack_dir :default: {{ base_dir }}/openstack/devstack .. zuul:rolevar:: neutron_dir :default: {{ gate_dest_dir }}/neutron ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4605885 ovn-octavia-provider-2.0.0/roles/configure_functional_tests/defaults/0000775000175000017500000000000000000000000026172 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/roles/configure_functional_tests/defaults/main.yaml0000664000175000017500000000050400000000000030001 0ustar00zuulzuul00000000000000tests_venv: "{{ tox_envlist }}" project_name: "ovn-octavia-provider" base_dir: "{{ ansible_user_dir }}/src/opendev.org" gate_dest_dir: "{{ base_dir }}/openstack" devstack_dir: "{{ base_dir }}/openstack/devstack" neutron_dir: "{{ gate_dest_dir }}/neutron" ovn_octavia_provider_dir: "{{ gate_dest_dir }}/ovn-octavia-provider" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/roles/configure_functional_tests/tasks/0000775000175000017500000000000000000000000025510 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/roles/configure_functional_tests/tasks/main.yaml0000664000175000017500000000160600000000000027323 0ustar00zuulzuul00000000000000- shell: cmd: | set -e set -x GATE_STACK_USER={{ ansible_user }} IS_GATE=True BASE_DIR={{ base_dir }} GATE_DEST={{ gate_dest_dir }} PROJECT_NAME={{ project_name }} NEUTRON_DIR={{ neutron_dir }} DEVSTACK_PATH={{ devstack_dir }} TOP_DIR={{ devstack_dir }} VENV={{ tests_venv }} STACK_USER=stack OVS_BRANCH={{ OVS_BRANCH }} OVN_BRANCH={{ OVN_BRANCH }} Q_BUILD_OVS_FROM_GIT={{ Q_BUILD_OVS_FROM_GIT }} # This is DB USER used in e.g. pgsql db DATABASE_USER=openstack_citest source $DEVSTACK_PATH/functions source $DEVSTACK_PATH/lib/neutron_plugins/ovs_source source $DEVSTACK_PATH/lib/neutron_plugins/ovn_agent source $NEUTRON_DIR/tools/configure_for_func_testing.sh configure_host_for_func_testing executable: /bin/bash ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/roles/fetch_journal_log/0000775000175000017500000000000000000000000022422 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/roles/fetch_journal_log/README.rst0000664000175000017500000000063500000000000024115 0ustar00zuulzuul00000000000000Collect journal log from test run By default, this stores journal log into log file and store it in "journal_log_file_path" **Role Variables** .. zuul:rolevar:: journal_log_path :default: {{ ansible_user_dir }}/workspace/logs Path where journal log file will be stored on job's node. .. zuul:rolevar:: journal_log_file_name :default: {{ journal_log_path }}/journal.log Name of journal log file. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/roles/fetch_journal_log/defaults/0000775000175000017500000000000000000000000024231 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/roles/fetch_journal_log/defaults/main.yaml0000664000175000017500000000015400000000000026041 0ustar00zuulzuul00000000000000journal_log_path: "{{ ansible_user_dir }}/logs" journal_log_file_name: "{{ journal_log_path }}/journal.log" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/roles/fetch_journal_log/tasks/0000775000175000017500000000000000000000000023547 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/roles/fetch_journal_log/tasks/main.yaml0000664000175000017500000000102500000000000025355 0ustar00zuulzuul00000000000000- name: Ensure {{ journal_log_path }} exists become: yes file: path: "{{ journal_log_path }}" state: directory owner: "{{ ansible_user }}" group: "{{ ansible_user }}" mode: 0775 - name: Store journal logs in {{ journal_log_file_name }} become: yes shell: cmd: | /bin/journalctl -a > {{ journal_log_file_name }} - name: Set journal.log file permissions become: yes file: path: '{{ journal_log_file_name }}' owner: '{{ ansible_user }}' group: '{{ ansible_user }}' mode: 0644 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/roles/setup_logdir/0000775000175000017500000000000000000000000021436 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/roles/setup_logdir/README.rst0000664000175000017500000000027000000000000023124 0ustar00zuulzuul00000000000000Configure logs dir to be accessible for ``stack`` user. **Role Variables** .. zuul:rolevar:: logdir :default: /opt/stack/logs Name of the directory where logs will be stored. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/roles/setup_logdir/defaults/0000775000175000017500000000000000000000000023245 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/roles/setup_logdir/defaults/main.yaml0000664000175000017500000000003000000000000025046 0ustar00zuulzuul00000000000000logdir: /opt/stack/logs ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/roles/setup_logdir/tasks/0000775000175000017500000000000000000000000022563 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/roles/setup_logdir/tasks/main.yaml0000664000175000017500000000024100000000000024370 0ustar00zuulzuul00000000000000- name: Ensure logdir exists become: yes file: path: "{{ logdir }}" state: directory owner: stack group: "{{ ansible_user }}" mode: 0775 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/setup.cfg0000664000175000017500000000210300000000000017427 0ustar00zuulzuul00000000000000[metadata] name = ovn-octavia-provider summary = OpenStack Octavia integration with OVN description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/ovn-octavia-provider/latest/ python_requires = >=3.6 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 [files] packages = ovn_octavia_provider [global] setup_hooks = pbr.hooks.setup_hook [entry_points] octavia.api.drivers = ovn = ovn_octavia_provider.driver:OvnProviderDriver octavia.driver_agent.provider_agents = ovn = ovn_octavia_provider.agent:OvnProviderAgent oslo.config.opts = octavia.api.drivers.ovn = ovn_octavia_provider.common.config:list_opts [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/setup.py0000664000175000017500000000200600000000000017322 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/test-requirements.txt0000664000175000017500000000122200000000000022050 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking>=3.0.1,<3.1.0 # Apache-2.0 bandit!=1.6.0,>=1.1.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 flake8-import-order==0.12 # LGPLv3 python-subunit>=1.0.0 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 pylint>=2.6.0 # GPLv2 testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD WebTest>=2.0.27 # MIT testtools>=2.2.0 # MIT # NOTE: Precisely we need wallaby neutron neutron>=18.0.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/tools/0000775000175000017500000000000000000000000016752 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/tools/check_unit_test_structure.sh0000775000175000017500000000331100000000000024602 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # This script identifies the unit test modules that do not correspond # directly with a module in the code tree. See TESTING.rst for the # intended structure. repo_path=$(cd "$(dirname "$0")/.." && pwd) base_test_path=ovn_octavia_provider/tests/unit test_path=$repo_path/$base_test_path test_files=$(find ${test_path} -iname 'test_*.py') ignore_regexes=( # Exceptional cases that should be skipped can be added here # EXAMPLE: "^objects/test_objects.py$" ) error_count=0 ignore_count=0 total_count=0 for test_file in ${test_files[@]}; do relative_path=${test_file#$test_path/} expected_path=$(dirname $repo_path/ovn_octavia_provider/$relative_path) test_filename=$(basename "$test_file") expected_filename=${test_filename#test_} # Module filename (e.g. foo/bar.py -> foo/test_bar.py) filename=$expected_path/$expected_filename # Package dir (e.g. foo/ -> test_foo.py) package_dir=${filename%.py} if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then for ignore_regex in ${ignore_regexes[@]}; do if [[ "$relative_path" =~ $ignore_regex ]]; then ignore_count=$((ignore_count + 1)) continue 2 fi done echo "Unexpected test file: $base_test_path/$relative_path" error_count=$((error_count + 1)) fi total_count=$((total_count + 1)) done if [ "$ignore_count" -ne 0 ]; then echo "$ignore_count unmatched test modules were ignored" fi if [ "$error_count" -eq 0 ]; then echo 'Success! All test modules match targets in the code tree.' exit 0 else echo "Failure! $error_count of $total_count test modules do not match targets in the code tree." exit 1 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/tools/coding-checks.sh0000775000175000017500000000306300000000000022014 0ustar00zuulzuul00000000000000#!/bin/sh # This script is copied from neutron and adapted for networking-ovn. set -eu usage () { echo "Usage: $0 [OPTION]..." echo "Run ovn_octavia_provider's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire ovn_octavia_provider module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } join_args() { if [ -z "$scriptargs" ]; then scriptargs="$opt" else scriptargs="$scriptargs $opt" fi } process_options () { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) join_args;; esac i=$((i+1)) done } run_pylint () { local target="${scriptargs:-all}" if [ "$target" = "all" ]; then files="ovn_octavia_provider" else case "$target" in *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; *) echo "$target is an unrecognized basecommit"; exit 1;; esac fi echo "Running pylint..." echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." if [ -n "${files}" ]; then pylint --rcfile=.pylintrc --output-format=colorized ${files} else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/tools/generate_config_file_samples.sh0000775000175000017500000000143000000000000025151 0ustar00zuulzuul00000000000000#!/bin/sh # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -e GEN_CMD=oslo-config-generator if ! type "$GEN_CMD" > /dev/null; then echo "ERROR: $GEN_CMD not installed on the system." exit 1 fi for file in `ls etc/oslo-config-generator/*`; do $GEN_CMD --config-file=$file done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/tools/pip_install_src_modules.sh0000775000175000017500000000122200000000000024223 0ustar00zuulzuul00000000000000#!/bin/bash # For networking-ovn unit tests, you can define git repos containing modules # that you want to use to override the requirements-based packages. # # Why, you ask? Because you made changes to neutron-lib, and you want # run the unit tests together. E.g.: # # env TOX_ENV_SRC_MODULES="$HOME/src/neutron-lib" tox -e py37 toxinidir="$1" if [ -z "$TOX_ENV_SRC_MODULES" ]; then exit 0 fi for repo in $TOX_ENV_SRC_MODULES; do d="${toxinidir}/${repo}" if [ ! -d "$d" ]; then echo "tox_env_src: error: no directory found at $d" continue fi echo "tox_env_src: pip installing from $d" pip install -e "$d" done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/tox.ini0000664000175000017500000001240400000000000017126 0ustar00zuulzuul00000000000000[tox] minversion = 3.18.0 envlist = docs,py38,pep8 skipsdist = True ignore_basepython_conflict = True [testenv] basepython = python3 usedevelop = True setenv = VIRTUAL_ENV={envdir} OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true} OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true} OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true} PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt allowlist_externals = bash sh passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY TOX_ENV_SRC_MODULES commands = {toxinidir}/tools/pip_install_src_modules.sh "{toxinidir}" stestr run {posargs} [testenv:pep8] envdir = {toxworkdir}/shared commands = flake8 {toxinidir}/tools/check_unit_test_structure.sh {toxinidir}/tools/coding-checks.sh --pylint '{posargs}' {[testenv:bandit]commands} {[testenv:genconfig]commands} [testenv:venv] commands = {posargs} [testenv:functional] setenv = {[testenv]setenv} OS_TEST_PATH=./ovn_octavia_provider/tests/functional OS_TEST_TIMEOUT=240 deps = {[testenv]deps} -r{toxinidir}/ovn_octavia_provider/tests/functional/requirements.txt [testenv:dsvm] # Fake job to define environment variables shared between dsvm jobs setenv = OS_TEST_TIMEOUT=240 OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} commands = false [testenv:dsvm-functional] setenv = {[testenv:functional]setenv} {[testenv:dsvm]setenv} deps = {[testenv:functional]deps} commands = stestr run {posargs} [testenv:cover] envdir = {toxworkdir}/shared setenv = {[testenv]setenv} PYTHON=coverage run --source ovn_octavia_provider --parallel-mode commands = stestr run --no-subunit-trace {posargs} coverage combine coverage report --fail-under=90 --skip-covered coverage html -d cover coverage xml -o cover/coverage.xml [testenv:docs] envdir = {toxworkdir}/docs deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt -r{toxinidir}/requirements.txt commands = sphinx-build -W -b html doc/source doc/build/html [testenv:pdf-docs] envdir = {toxworkdir}/docs deps = {[testenv:docs]deps} allowlist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:debug] envdir = {toxworkdir}/shared commands = oslo_debug_helper -t ovn_octavia_provider/tests {posargs} [testenv:releasenotes] envdir = {toxworkdir}/docs deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [flake8] # W504 line break after binary operator ignore = W504 # H106: Don't put vim configuration in source files # H203: Use assertIs(Not)None to check for None # H204: Use assert(Not)Equal to check for equality # H205: Use assert(Greater|Less)(Equal) for comparison # H904: Delay string interpolations at logging calls enable-extensions=H106,H203,H204,H205,H904 show-source = True exclude=./.*,dist,doc,*egg*,build,releasenotes import-order-style = pep8 [hacking] import_exceptions = ovn_octavia_provider.i18n [flake8:local-plugins] extension = N322 = checks:check_assert_called_once_with N328 = checks:check_asserttruefalse N330 = checks:check_assertempty N331 = checks:check_assertisinstance N332 = checks:check_assertequal_for_httpcode N343 = checks:check_no_imports_from_tests N344 = checks:check_python3_no_filter N347 = checks:check_no_import_mock N348 = checks:check_assertcountequal paths =./ovn_octavia_provider/hacking [testenv:genconfig] envdir = {toxworkdir}/shared commands = {toxinidir}/tools/generate_config_file_samples.sh # This environment can be used to quickly validate that all needed system # packages required to successfully execute test targets are installed [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files. deps = bindep commands = bindep test [testenv:lower-constraints] deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt [testenv:requirements] deps = -egit+https://opendev.org/openstack/requirements#egg=openstack-requirements allowlist_externals = sh commands = sh -c '{envdir}/src/openstack-requirements/playbooks/files/project-requirements-change.py --req {envdir}/src/openstack-requirements --local {toxinidir} master' [testenv:bandit] envdir = {toxworkdir}/shared deps = -r{toxinidir}/test-requirements.txt commands = bandit -r ovn_octavia_provider -x tests -n5 [testenv:dev] # run locally (not in the gate) using editable mode # https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs commands = pip install -q -e "git+https://git.openstack.org/openstack/neutron#egg=neutron" {[testenv]commands} [testenv:pep8-dev] deps = {[testenv]deps} commands = {[testenv:dev]commands} {[testenv:pep8]commands} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641395.4645884 ovn-octavia-provider-2.0.0/zuul.d/0000775000175000017500000000000000000000000017033 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/zuul.d/base.yaml0000664000175000017500000001442100000000000020633 0ustar00zuulzuul00000000000000- job: name: ovn-octavia-provider-functional-base parent: devstack-minimal description: Run OVN Octavia provider functional tests timeout: 7800 required-projects: - opendev.org/openstack/devstack - openstack/neutron - openstack/requirements roles: - zuul: openstack/devstack pre-run: playbooks/configure_functional_job.yaml run: playbooks/run_functional_job.yaml post-run: playbooks/post_functional_job.yaml irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ vars: devstack_services: # Ignore any default set by devstack. Emit a "disable_all_services". base: false devstack_localrc: INSTALL_TESTONLY_PACKAGES: true DATABASE_PASSWORD: stackdb tox_envlist: dsvm-functional tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/openstack/requirements/upper-constraints.txt' zuul_copy_output: # We need to copy directory with logs to have it in job artifacts also, # /opt/stack/logs is default logs directory defined in neutron's # tox.ini file '{{ devstack_base_dir }}/logs/dsvm-functional-logs': logs - job: name: ovn-octavia-provider-functional-release parent: ovn-octavia-provider-functional-base description: Run OVN Octavia provider functional tests vars: Q_BUILD_OVS_FROM_GIT: True INSTALL_OVN: True OVN_BRANCH: v20.06.0 OVS_BRANCH: v2.13.0 - job: name: ovn-octavia-provider-functional-master parent: ovn-octavia-provider-functional-base description: Run OVN Octavia provider functional tests - OVN master vars: Q_BUILD_OVS_FROM_GIT: True INSTALL_OVN: True OVN_BRANCH: main OVS_BRANCH: master - job: name: ovn-octavia-provider-tempest-base parent: devstack-tempest abstract: true timeout: 7800 required-projects: - openstack/neutron - openstack/octavia - openstack/octavia-lib - openstack/octavia-tempest-plugin - openstack/python-octaviaclient - openstack/ovn-octavia-provider irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^etc/.*$ - ^releasenotes/.*$ vars: devstack_localrc: Q_AGENT: ovn Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn,logger Q_ML2_PLUGIN_TYPE_DRIVERS: local,flat,vlan,geneve Q_ML2_TENANT_NETWORK_TYPE: geneve USE_PYTHON3: True TEMPEST_PLUGINS: '/opt/stack/octavia-tempest-plugin' OCTAVIA_NODE: api OCTAVIA_TEMPEST_PLUGIN_CUSTOMIZE_IMAGE: true DISABLE_AMP_IMAGE_BUILD: true OVN_L3_CREATE_PUBLIC_NETWORK: true Q_USE_PROVIDERNET_FOR_PUBLIC: true PHYSICAL_NETWORK: public ENABLE_CHASSIS_AS_GW: true OVN_DBS_LOG_LEVEL: dbg devstack_local_conf: post-config: $OCTAVIA_CONF: api_settings: enabled_provider_drivers: 'ovn:OVN provider driver' default_provider_driver: 'ovn' test-config: "$TEMPEST_CONFIG": load_balancer: provider: 'ovn' enable_security_groups: True enabled_provider_drivers: 'ovn:OVN provider driver' test_sctp_protocol: True loadbalancer-feature-enabled: health_monitor_enabled: True pool_algorithms_enabled: False l7_protocol_enabled: False l4_protocol: "TCP" session_persistence_enabled: False not_implemented_is_error: False devstack_services: c-bak: false ceilometer-acentral: false ceilometer-acompute: false ceilometer-alarm-evaluator: false ceilometer-alarm-notifier: false ceilometer-anotification: false ceilometer-api: false ceilometer-collector: false c-sch: false c-api: false c-vol: false cinder: false q-svc: true q-dns: true q-dhcp: false q-agt: false q-meta: false q-l3: false ovn-northd: true ovn-controller: true q-ovn-metadata-agent: true octavia: true o-api: true o-da: true o-hk: true o-cw: false o-hm: false swift: false s-account: false s-container: false s-object: false s-proxy: false tempest: true etcd: false devstack_plugins: neutron: https://opendev.org/openstack/neutron.git octavia: https://opendev.org/openstack/octavia.git octavia-tempest-plugin: https://opendev.org/openstack/octavia-tempest-plugin.git ovn-octavia-provider: https://opendev.org/openstack/ovn-octavia-provider tempest_test_regex: "^octavia_tempest_plugin.tests.(api|scenario).v2" tempest_exclude_regex: "\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_http_traffic)|\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_tcp_traffic)|\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_udp_traffic)|\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_sctp_traffic)|\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_source_ip_port_tcp_traffic)|\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_source_ip_port_udp_traffic)" zuul_copy_output: '{{ devstack_base_dir }}/data/ovs': 'logs' '{{ devstack_base_dir }}/data/ovn': 'logs' '{{ devstack_log_dir }}/ovsdb-server-nb.log': 'logs' '{{ devstack_log_dir }}/ovsdb-server-sb.log': 'logs' tempest_concurrency: 2 tox_envlist: all - job: name: ovn-octavia-provider-tempest-release parent: ovn-octavia-provider-tempest-base vars: devstack_localrc: OVN_BUILD_FROM_SOURCE: True OVN_BRANCH: v20.06.0 OVS_BRANCH: v2.13.0 - job: name: ovn-octavia-provider-tempest-master parent: ovn-octavia-provider-tempest-base vars: devstack_localrc: OVN_BUILD_FROM_SOURCE: True OVN_BRANCH: main OVS_BRANCH: master ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641357.0 ovn-octavia-provider-2.0.0/zuul.d/project.yaml0000664000175000017500000000146700000000000021375 0ustar00zuulzuul00000000000000- project: templates: - publish-openstack-docs-pti - release-notes-jobs-python3 - check-requirements - openstack-python3-yoga-jobs-neutron - openstack-lower-constraints-jobs-neutron check: jobs: - openstack-tox-cover: required-projects: - openstack/neutron - ovn-octavia-provider-functional-release - ovn-octavia-provider-functional-master - ovn-octavia-provider-tempest-release - ovn-octavia-provider-tempest-master: voting: false - kuryr-kubernetes-tempest-ovn-provider-ovn: voting: false gate: fail-fast: true jobs: - ovn-octavia-provider-functional-release - ovn-octavia-provider-functional-master - ovn-octavia-provider-tempest-release