././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/0000775000175100017510000000000015033037526016001 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/.coveragerc0000664000175100017510000000016515033037524020122 0ustar00mylesmyles[run] branch = True source = ovn_octavia_provider omit = ovn_octavia_provider/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/.pre-commit-config.yaml0000664000175100017510000000222715033037524022263 0ustar00mylesmyles--- default_language_version: # force all unspecified python hooks to run python3 python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: trailing-whitespace - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' - id: check-byte-order-marker - id: check-executables-have-shebangs - id: check-merge-conflict - id: debug-statements - id: check-yaml - repo: https://github.com/lucas-c/pre-commit-hooks rev: v1.5.4 hooks: - id: remove-tabs exclude: '.*\.(svg)$' - repo: local hooks: - id: flake8 name: flake8 additional_dependencies: - hacking>=6.1.0,<6.2.0 language: python entry: flake8 files: '^.*\.py$' exclude: '^(doc|releasenotes|tools)/.*$' # todo(slaweq): enable pylint check once all issues in the current code will # be solved # - id: pylint # name: pylint # entry: pylint # files: ^ovn_octavia_provider/ # language: system # types: [python] # args: ['--rcfile=.pylintrc', '--output-format=colorized'] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/.pylintrc0000664000175100017510000000562415033037524017653 0ustar00mylesmyles# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. ignore=.git,tests [MESSAGES CONTROL] # TODO: This list is copied from neutron, the options which do not need to be # suppressed have been already removed, some of the remaining options will be # removed by code adjustment. disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise # "E" Error for important programming issues (likely bugs) no-member, # "W" Warnings for stylistic problems or minor programming issues abstract-method, arguments-differ, attribute-defined-outside-init, broad-except, dangerous-default-value, fixme, global-statement, protected-access, redefined-builtin, redefined-outer-name, signature-differs, unused-argument, unused-import, unused-variable, useless-super-delegation, # "C" Coding convention violations invalid-name, len-as-condition, missing-docstring, superfluous-parens, ungrouped-imports, wrong-import-order, # "R" Refactor recommendations duplicate-code, no-else-return, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-public-methods, too-many-return-statements, too-many-statements, inconsistent-return-statements, useless-object-inheritance, too-many-nested-blocks, too-many-boolean-expressions, not-callable, # new for python3 version of pylint chained-comparison, consider-using-dict-comprehension, consider-using-in, consider-using-set-comprehension, unnecessary-pass, useless-object-inheritance, arguments-renamed, too-many-positional-arguments [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= [CLASSES] [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use oslo_serialization.jsonutils json [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/.stestr.conf0000664000175100017510000000012215033037524020243 0ustar00mylesmyles[DEFAULT] test_path=${OS_TEST_PATH:-./ovn_octavia_provider/tests/unit} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/AUTHORS0000664000175100017510000000254115033037524017051 0ustar00mylesmylesBrian Haley Brian Haley Chris Buggy Corey Bryant Dmitriy Rabotyagov Fernando Royo Flavio Fernandes Flavio Fernandes Ghanshyam Mann Gregory Thiemonge Hervé Beraud Ihar Hrachyshka Jake Yip Jakub Libosvar James Page Lucas Alvares Gomes Luis Tomas Bolivar Maciej JJózefczyk Maciej Jozefczyk Maciej Józefczyk Manpreet Kaur Michał Nasiadka Miguel Lavalle OpenStack Release Bot Pierre Riteau Rico Lin Rodolfo Alonso Hernandez Sami Yessou Sean McGinnis Slawek Kaplonski Takashi Kajinami Terry Wilson Vasyl Saienko ricolin shanyunfan33 yatinkarel ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/CONTRIBUTING.rst0000664000175100017510000000106515033037524020442 0ustar00mylesmylesIf you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/neutron/+bugs?field.tag=ovn-octavia-provider ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ChangeLog0000664000175100017510000002615215033037524017557 0ustar00mylesmylesCHANGES ======= * Patching Octavia LB pool removal issue and adding updated unit tests * Replace the OVN Metadata agent with the OVN agent * [FT] Unlock the functional jobs * Add documentation for octavia-ovn-db-sync-util * Respect passed arguments for Neutron client connection * Prepare to handle ha\_chassis\_group for LRP * Fix tox coverage test result filename * Fix OVN DB sync when syncing an OVN LB from scratch * Update master for stable/2025.1 8.0.0 ----- * Add sync floating IP support * Validate ovn\_nb/sb\_connection in config parser * Drop unused environment * Add Health monitor sync logic * Add Member sync logic * Add Pool sync logic * Add Listener sync logic * Ignore reno artefacts (RELEASENOTES.rst and reno.cache) * [eventlet-removal] Remove "logger" mechanism from ML2/OVN CI jobs * Add LB sync logic * Update default envlist * Remove join on helper request daemon thread * Add Tobiko to gate jobs * Bump hacking * Add octavia\_client with openstacksdk * Adding Tobiko Test To CI * Add Octavia OVN DBs sync cmd * reno: Update master for unmaintained/2023.1 * Remove Python 3.8 support * Fix pep8 with pylint 3.3.0 * Update master for stable/2024.2 7.0.0 ----- * Fix member subnet id on a fully populated LB * Error log for missing certs with NB and SB DBs * Maintenance task: do not change IPv4 ip\_port\_mappings * [OVN][CI] Update OVS\_BRANCH to be compatible with ovn main * Return DELETED status when deleting a nonexistent member * Don't create an OVSDB connection per API request * tests: Remove obsolete TODO about a fd leak * Fix new pylint errors * Add pre-commit configuration * Fix small flake8 issues pointed out by the pre-commit script * Remove leftover OVN LB HM port upon deletion of a member * reno: Update master for unmaintained/zed * FIX OVN LB Health Monitor checks for IPv6 members * Adding isolation to functional tests * Update master for stable/2024.1 6.0.0 ----- * Fix check for a CR-LRP as a gateway port * Retry on case of sqlite3.InterfaceError on FT * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * tox: Drop envdir * [OVN][CI] Update OVS\_BRANCH to be compatible with ovn main * tests: call correct assert\_\* methods * reno: Update master for unmaintained/yoga * Fix pattern to exclude generated config file * Allow LB members to mix IPv4 and IPv6 for the multivip LB * Remove unnecessary ceilometer service overrides * Allow multiple VIPs per LB * Add maintenance task to update entities on component start * Change device\_owner used for OVN LB HM port * Check multiple address of a LRP plugged to LS * Update master for stable/2023.2 5.0.0.0rc1 ---------- * Cover the use case of a member non existing * Bump Neutron and neutron-lib versions * [CI] Bump OVS\_BRANCH in ovs/ovn source deploy jobs * Add FIP on LogicalSwitchPortUpdate event * Fix port for Load Balancer Health Check for FIP * Update pool upon HM deletion request * Add support for SOURCE\_IP session persistence * Ensure DVR is restablished on member on cascade deletion * Apply admin\_state\_up on a new member creation * Discard batch-update-members not valid request * Add retry on case of sqlite3.InterfaceError on FT * Fix update member action * Replace python-neutronclient with openstacksdk * Update doc about ovn-octavia HM type limitation * Pin OVS\_BRANCH to working commit * Use ovsdbapp commands to add/del backends to ip\_port\_mappings * Fix broken pep8 jobs due to bandit 1.7.5 updated version * Update master for stable/2023.1 * Add new FTs for health monitoring basic operations * Remove HM uuiid from LB external\_ids when the HM's pool is deleted 4.0.0.0rc1 ---------- * Reset member provisioning status to NO\_MONITOR when a HM is deleted * Ensure HM also apply to FIPs associated to LB VIPs * Avoid use of ovn metadata port IP for HM checks * Remove LB from LS belonging to provider networks * Fix jobs after tox4 upgrade * Uncouple HM status of member statuses * Fix listener provisioning\_status after HM created/deleted * Pin OVS\_BRANCH to master again * Make clear distinction between health check and health monitor * Ensure HM updates work as expected * Add support for HM on a fullypopulated load balancers * Increase code coverage * Ensure OVN-LB is properly configured upon LS removal from LR * Optimization for find\_ls\_for\_lr * Ensure LB are removed from LS not connected to the LB LR * Remove duplicated constant entry * [OVN] Pin OVS version for OVN master * Ensure lbs are properly configured for router gateway set/unset * Avoid LB ERROR status on delete when LS/LR are already deleted * Switch to 2023.1 Python3 unit tests and generic template name * Update master for stable/zed 3.0.0 ----- * Capture port deleted event associated to HM ServiceMonitor * Update documentation about ovn-octavia limitations * Fix create\_vip\_port prototype based on octavia-lib * [OVN] Pin OVS version for OVN master * Fix healthMonitor events affecting to unrelated LB * Ensure members without subnet belong to VIP subnet or fail * Fix IPv6 member delete on batch operation * Drop lower-constraints.txt and its testing * Update python testing as per zed cycle teting runtime * Apply ServiceMonitorEvent to affected LBs * Fix way of calculate LB status after HM event * Fix Load balancer remains on PENDING\_CREATE * Fix request to OVN NB DB API * Delete vip port if create\_vip\_port raise exception * [OVN] Fix DuplicateOptionError on test scope * Use python3-devel in bindep for rpm distros * Increase code coverage * [OVN] Pin OVS version for OVN v21.06.0 * Avoid loadbalancer stuck in PENDING\_X if delete\_vip\_port fails * Remove incorrect character in f-string * Fix zuul templates for functional and tempest tests * Fix deletion of members without subnet\_id * Retry logical switch associations to load balancers * Add SB connection to in devstack * Add Python3 zed unit tests * Update master for stable/yoga * Add SB ssl connection to in devstack 2.0.0 ----- * Update python testing classifier * Allow to create ovn loadbalancer on dual-stack provider networks * Add support for fullypopulated load balancers * [FT] Enable OVS and OVN compilation from source * Set listeners back to ACTIVE upon pool/member action failures * remove unicode from code * Check gateway IP while looking for LR plugged to LS * Fix functional tests job * Support creating members without a subnet ID * Add Python3 yoga unit tests * Update master for stable/xena * Fix lower-constanints and pep8 jobs 1.1.1 ----- * Update docs based on support added recently 1.1.0 ----- * Add Health Monitor support * Fix race condition retrieving logical router rows * docs: Update Freenode to OFTC * Disable some high failure rate tempest tests * Fix new pylint issues * Add a Kuryr Kubernetes co-gating job * Fix functional jobs due to OVS file removal * Ensure that load balancer is added to logical switch * Change minversion of tox to 3.18.0 * Add log for request\_handler events * setup.cfg: Replace dashes with underscores * Fix python 3.9 unit test failures * Add Python3 xena unit tests * Update master for stable/wallaby * Improve enabled\_provider\_drivers default in devstack 1.0.0 ----- * Start running the tempest API tests * Switch to new rolevar for run-temepst role * Add SCTP support * Remove devstack-gate reference * Update sample local.conf to mention OVN\_BRANCH * Check if no members in pool better in batch update * Don't try to store protocol=None in OVSDB * Update to pylint 2.6.0+ * Collect OVN logs * Enable the IPv6 tempest tests * Correctly set member operating status * Change to build OVN from source * Delay string interpolations at logging calls * Remove work around change for test\_port\_forwarding * Fix gate failure * Return UnsupportedOptionError() on loadbalancer failover * Use get\_ovn\_ovsdb\_retry\_max\_interval() * Retry status updates to Octavia * Fix leaked file descriptors by cleaning up objects * Change devstack script to correctly configure driver * Include python36-devel in bindep * Add master and release tempest jobs * Fix and enable test\_port\_forwarding * Switch from unittest2 compat methods to Python 3.x methods * Add Python3 wallaby unit tests * Fix the check for allowed\_cidrs in listeners * Update master for stable/victoria * Mark test\_port\_forwarding unstable to fix gate 0.3.0 ----- * test\_integration.py: remove neutron dependency * Add integration tests with port forwarding * Set OPERATING\_STATUS to ONLINE when pool created with listener * Switch to TOX\_CONSTRAINTS\_FILE * Fix broken gate * Omit tests directory while couting the coverage report * Set the CONF.load\_balancer.enabled\_provider\_drivers tempest conf * Fix member\_batch\_update function to follow api v2 * Add installation of octavia-tempest-plugin to sample local.conf * Update ROUND\_ROBIN to SOURCE\_IP\_PORT in docs * Fix pep8 and functional jobs 0.2.0 ----- * drop mock from lower-constraints * Add a hacking check for importing mock library * Remove unnecessary libraries from lower-constraints * Remove python modules related to coding style checks * Switch to newer openstackdocstheme and reno versions * Do not send status update in case of IpAddressAlreadyAllocated * Fix the Backend class for ovsbapp index changes * Add support for OVN LB selection fields * Fix hacking min version to 3.0.1 * Re-home functional tests * Re-home ovsdb event classes * Re-home unit tests * Re-home OvnProviderHelper class * Fix E741 pep8 errors * Re-home get\_neutron\_client() into common/clients.py * Re-home constants to common/constants.py * Re-home OvnNbIdlForLb class * Use more octavia-lib constants * Re-home MockedLB to tests.unit.fakes * Re-home exceptions to common/exceptions.py * Add release note README file * Bump default tox env from py37 to py38 * Add py38 package metadata * Spawn long-running processes in the driver agent * Update requirements and constraints * Make ovn-octavia-provider-v2-dsvm-scenario voting * Add Python3 victoria unit tests * Update master for stable/ussuri * Update the devstack local.conf sample file * Improve test coverage * Remove the dependency on the "mock" package * Fix incorrect os-testr test requirement * Update hacking for Python3 0.1.0 ----- * Add missing requirements * Stop using Octavia network driver * Respect SSL devstack configuration * Remove backwards-compatibility check * Add unit tests for hacking/checks.py * Add tempest gate, devstack plugin and sample devstack config * Add configuration page to docs * Use queue library directly * Do not try to refresh vips on OVN LB that will be deleted * Enable cover job * Add admin and contributor documentation * Wrap IPv6 address with brackets * Fix Exception string arguments * Centralize traffic when LB and member has FIP * Don't fail if VIP already exist or has been deleted before * Ensure setup.cfg packages matches root directory * Don't send malformed status update to Octavia * Fix py2 vs py3 dict keys comparison in functional test * Cache OvnProviderHelper object in OvnProviderDriver * Don't fail in case subnet or Logical Switch not found * Add support for multiple L4 protocols withing same LB * Import OVN LB functional tests * Move OVN Octavia Provider driver code to this repository * Set basepython in tox.ini to python3 * Initialize repository * Added .gitreview ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/HACKING.rst0000664000175100017510000000163615033037524017603 0ustar00mylesmylesovn-octavia-provider Style Commandments =============================================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ Below you can find a list of checks specific to this repository. - [N322] Detect common errors with assert_called_once_with - [N328] Detect wrong usage with assertEqual - [N330] Use assertEqual(*empty*, observed) instead of assertEqual(observed, *empty*) - [N331] Detect wrong usage with assertTrue(isinstance()). - [N332] Use assertEqual(expected_http_code, observed_http_code) instead of assertEqual(observed_http_code, expected_http_code). - [N343] Production code must not import from ovn_octavia_provider.tests.* - [N344] Python 3: Do not use filter(lambda obj: test(obj), data). Replace it with [obj for obj in data if test(obj)]. - [N347] Test code must not import mock library - [N348] Detect usage of assertItemsEqual ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/LICENSE0000664000175100017510000002363715033037524017017 0ustar00mylesmyles Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/PKG-INFO0000644000175100017510000000501015033037526017070 0ustar00mylesmylesMetadata-Version: 2.2 Name: ovn-octavia-provider Version: 8.1.0.dev15 Summary: OpenStack Octavia integration with OVN Home-page: https://docs.openstack.org/ovn-octavia-provider/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.9 License-File: LICENSE Requires-Dist: keystoneauth1>=3.14.0 Requires-Dist: netaddr>=0.7.18 Requires-Dist: neutron-lib>=3.8.0 Requires-Dist: openstacksdk>=0.103.0 Requires-Dist: oslo.config>=8.0.0 Requires-Dist: oslo.log>=4.3.0 Requires-Dist: oslo.messaging>=12.4.0 Requires-Dist: oslo.serialization>=2.28.1 Requires-Dist: oslo.utils>=4.5.0 Requires-Dist: ovs>=2.10.0 Requires-Dist: ovsdbapp>=2.1.0 Requires-Dist: pbr>=4.0.0 Requires-Dist: SQLAlchemy>=1.4.23 Requires-Dist: tenacity>=6.0.0 Requires-Dist: octavia-lib>=2.2.0 Dynamic: author Dynamic: author-email Dynamic: classifier Dynamic: description Dynamic: home-page Dynamic: requires-dist Dynamic: requires-python Dynamic: summary =================================================================== ovn-octavia-provider - OVN Provider driver for Octavia LoadBalancer =================================================================== OVN provides virtual networking for Open vSwitch and is a component of the Open vSwitch project. This project provides integration between OpenStack Octavia and OVN. * Free software: Apache license * Source: https://opendev.org/openstack/ovn-octavia-provider * Bugs: https://bugs.launchpad.net/neutron/+bugs?field.tag=ovn-octavia-provider * Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss * IRC: #openstack-neutron on OFTC. * Docs: https://docs.openstack.org/ovn-octavia-provider/latest Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ovn-octavia-provider.svg :target: https://governance.openstack.org/tc/reference/tags/index.html * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/ovn-octavia-provider ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/README.rst0000664000175100017510000000203315033037524017464 0ustar00mylesmyles=================================================================== ovn-octavia-provider - OVN Provider driver for Octavia LoadBalancer =================================================================== OVN provides virtual networking for Open vSwitch and is a component of the Open vSwitch project. This project provides integration between OpenStack Octavia and OVN. * Free software: Apache license * Source: https://opendev.org/openstack/ovn-octavia-provider * Bugs: https://bugs.launchpad.net/neutron/+bugs?field.tag=ovn-octavia-provider * Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss * IRC: #openstack-neutron on OFTC. * Docs: https://docs.openstack.org/ovn-octavia-provider/latest Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ovn-octavia-provider.svg :target: https://governance.openstack.org/tc/reference/tags/index.html * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/ovn-octavia-provider ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924565.0 ovn_octavia_provider-8.1.0.dev15/RELEASENOTES.rst0000664000175100017510000005020215033037525020422 0ustar00mylesmyles==================== ovn-octavia-provider ==================== .. _ovn-octavia-provider_8.0.0-10: 8.0.0-10 ======== .. _ovn-octavia-provider_8.0.0-10_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/add-neutron-client-interface-info-6a018cad49b5d240.yaml @ b'7a17cd1ae311a88a9db2ee75413b115100dbf012' - [`bug 2110488 `_] Fixed wrong endpoint information in Neutron client configuration. .. _ovn-octavia-provider_0.1.3-24: 0.1.3-24 ======== .. _ovn-octavia-provider_0.1.3-24_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml @ b'bf86846205718f652457aec1943e4c53334dd2a6' - Creating members without specifying a subnet ID is now supported. Since the subnet ID is an optional API argument, if not given the provider driver will now attempt to look it up via the pool ID that is a required argument. .. _ovn-octavia-provider_0.1.0: 0.1.0 ===== .. _ovn-octavia-provider_0.1.0_Prelude: Prelude ------- .. releasenotes/notes/new-repository-for-ovn-octavia-provider-driver-dd81c4414c529c4e.yaml @ b'184f629f17a0ddba55d502c95f1493930d599677' OVN Octavia provider driver has been created from the networking-ovn repository. .. _ovn-octavia-provider_0.1.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/new-repository-for-ovn-octavia-provider-driver-dd81c4414c529c4e.yaml @ b'184f629f17a0ddba55d502c95f1493930d599677' - OVN Octavia Provider driver registers under the same entry point. There is no action to be done from operator side. .. _ovn-octavia-provider_0.1.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/ovn-octavia-provider-driver-multiple-protocols-4a93e184b8f374c7.yaml @ b'15260b7439d6b60530f8e728674f07470bc16c42' - OVN Octavia provider driver now supports both TCP and UDP pool/listener protocols configured in the same Octavia Load Balancer. .. _ovn-octavia-provider_0.4.1-23: 0.4.1-23 ======== .. _ovn-octavia-provider_0.4.1-23_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml @ b'3860ca0159f50bd530c2198a55c50724db02cc0c' - Creating members without specifying a subnet ID is now supported. Since the subnet ID is an optional API argument, if not given the provider driver will now attempt to look it up via the pool ID that is a required argument. .. _ovn-octavia-provider_1.0.2-25: 1.0.2-25 ======== .. _ovn-octavia-provider_1.0.2-25_Known Issues: Known Issues ------------ .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'9eea0b9c5de0a7a0c8604d08be4d3416965bcf29' - Load Balancer Health Check for Floating IPs are not populated with the protocol port. At this way, when a backend is detected on ERROR state requests to the Floating IP are still distribute to the ERROR'ed members. In order to fix the existing Load Balancer Health Checks it is required to recreate the entire Octavia Health Monitor, which will recreate the associated OVN Load Balancer Health Check(s). .. _ovn-octavia-provider_1.0.2-25_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'9eea0b9c5de0a7a0c8604d08be4d3416965bcf29' - [`bug 1997418 `_] Added the protocol port to the Load Balancer Health Check associated with the Floating IP, additional fields have been introduced to the external_ids to provide more accuracy information about the entities affected by any change over the Floating IP or LB VIP. .. _ovn-octavia-provider_1.0.1: 1.0.1 ===== .. _ovn-octavia-provider_1.0.1_New Features: New Features ------------ .. releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml @ b'59e09f053e89eee7859e1d384ca95561a45adcdd' - The OVN Octavia provider drvier now supports health monitoring. TCP and UDP Connect health monitors are now supported by the provider driver, when the underlying OVN version supports them. The health monitor uses the OVN distributed DHCP port as the source IP for messages by default, if one doesn't exist then a port will be created on each given subnet. The list of member ports to monitor is updated whenever one is created or deleted. .. _ovn-octavia-provider_1.0.1_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml @ b'08430b535eb56e3758e63f2a97c7e3a383769bee' - Creating members without specifying a subnet ID is now supported. Since the subnet ID is an optional API argument, if not given the provider driver will now attempt to look it up via the pool ID that is a required argument. .. _ovn-octavia-provider_1.0.0: 1.0.0 ===== .. _ovn-octavia-provider_1.0.0_New Features: New Features ------------ .. releasenotes/notes/add-sctp-support-bedfed905e1f5a58.yaml @ b'b796f4ebe052ebace29f29c06e8876342a49f26e' - Add support for the SCTP protocol in the OVN provider driver. .. _ovn-octavia-provider_1.3.1-12: 1.3.1-12 ======== .. _ovn-octavia-provider_1.3.1-12_Known Issues: Known Issues ------------ .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'8eef0beb1d554f5bfb7426723a830033999fbec5' - Load Balancer Health Check for Floating IPs are not populated with the protocol port. At this way, when a backend is detected on ERROR state requests to the Floating IP are still distribute to the ERROR'ed members. In order to fix the existing Load Balancer Health Checks it is required to recreate the entire Octavia Health Monitor, which will recreate the associated OVN Load Balancer Health Check(s). .. _ovn-octavia-provider_1.3.1-12_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'8eef0beb1d554f5bfb7426723a830033999fbec5' - [`bug 1997418 `_] Added the protocol port to the Load Balancer Health Check associated with the Floating IP, additional fields have been introduced to the external_ids to provide more accuracy information about the entities affected by any change over the Floating IP or LB VIP. .. _ovn-octavia-provider_1.2.0: 1.2.0 ===== .. _ovn-octavia-provider_1.2.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml @ b'0896d5f4ec77febc939c3e8b33d94f934eddf805' - Creating members without specifying a subnet ID is now supported. Since the subnet ID is an optional API argument, if not given the provider driver will now attempt to look it up via the pool ID that is a required argument. .. _ovn-octavia-provider_1.1.0: 1.1.0 ===== .. _ovn-octavia-provider_1.1.0_New Features: New Features ------------ .. releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml @ b'8bbd8f1b62d7811433079bff8ce940fbdd041a0c' - The OVN Octavia provider drvier now supports health monitoring. TCP and UDP Connect health monitors are now supported by the provider driver, when the underlying OVN version supports them. The health monitor uses the OVN distributed DHCP port as the source IP for messages by default, if one doesn't exist then a port will be created on each given subnet. The list of member ports to monitor is updated whenever one is created or deleted. .. _ovn-octavia-provider_2.2.0: 2.2.0 ===== .. _ovn-octavia-provider_2.2.0_Known Issues: Known Issues ------------ .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'480bfc34557720e2eccb8a9dd46494ab18dcada0' - Load Balancer Health Check for Floating IPs are not populated with the protocol port. At this way, when a backend is detected on ERROR state requests to the Floating IP are still distribute to the ERROR'ed members. In order to fix the existing Load Balancer Health Checks it is required to recreate the entire Octavia Health Monitor, which will recreate the associated OVN Load Balancer Health Check(s). .. _ovn-octavia-provider_2.2.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'480bfc34557720e2eccb8a9dd46494ab18dcada0' - [`bug 1997418 `_] Added the protocol port to the Load Balancer Health Check associated with the Floating IP, additional fields have been introduced to the external_ids to provide more accuracy information about the entities affected by any change over the Floating IP or LB VIP. .. _ovn-octavia-provider_2.0.0: 2.0.0 ===== .. _ovn-octavia-provider_2.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml @ b'675ea9c35ef73765828e3db4b636f13465a76596' - Creating members without specifying a subnet ID is now supported. Since the subnet ID is an optional API argument, if not given the provider driver will now attempt to look it up via the pool ID that is a required argument. .. _ovn-octavia-provider_3.1.2: 3.1.2 ===== .. _ovn-octavia-provider_3.1.2_Known Issues: Known Issues ------------ .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'726fc4528819fa325698af97f15b4b35b4988c78' - Load Balancer Health Check for Floating IPs are not populated with the protocol port. At this way, when a backend is detected on ERROR state requests to the Floating IP are still distribute to the ERROR'ed members. In order to fix the existing Load Balancer Health Checks it is required to recreate the entire Octavia Health Monitor, which will recreate the associated OVN Load Balancer Health Check(s). .. _ovn-octavia-provider_3.1.2_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'726fc4528819fa325698af97f15b4b35b4988c78' - [`bug 1997418 `_] Added the protocol port to the Load Balancer Health Check associated with the Floating IP, additional fields have been introduced to the external_ids to provide more accuracy information about the entities affected by any change over the Floating IP or LB VIP. .. _ovn-octavia-provider_3.0.0: 3.0.0 ===== .. _ovn-octavia-provider_3.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/drop-python-3-6-and-3-7-e890961ed94c146e.yaml @ b'16978de5286df0ec5923dcca1b2d228af0810e4a' - Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. .. _ovn-octavia-provider_2023.1-eom: 2023.1-eom ========== .. _ovn-octavia-provider_2023.1-eom_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml @ b'ecb8dc1ba6487ee8148513a1e72888b167980dfd' - [`bug 2072754 `_] Fixed maintenance task that was breaking IPv4 load balancers with health monitors. .. _ovn-octavia-provider_4.0.2: 4.0.2 ===== .. _ovn-octavia-provider_4.0.2_Known Issues: Known Issues ------------ .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'e6395bd485c6b061e884ffdc1c9a32cc96f65d7a' - Load Balancer Health Check for Floating IPs are not populated with the protocol port. At this way, when a backend is detected on ERROR state requests to the Floating IP are still distribute to the ERROR'ed members. In order to fix the existing Load Balancer Health Checks it is required to recreate the entire Octavia Health Monitor, which will recreate the associated OVN Load Balancer Health Check(s). .. _ovn-octavia-provider_4.0.2_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/new-device-owner-627ee59b95b670b4.yaml @ b'3c73ab90c61e3ea57988b60dc0c6d457700919ab' - In order to support the new 'device_owner' for OVN Load Balancer Health Monitor ports this version requires a Neutron version > 23.0.0rc2 .. _ovn-octavia-provider_4.0.2_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'e6395bd485c6b061e884ffdc1c9a32cc96f65d7a' - [`bug 1997418 `_] Added the protocol port to the Load Balancer Health Check associated with the Floating IP, additional fields have been introduced to the external_ids to provide more accuracy information about the entities affected by any change over the Floating IP or LB VIP. .. releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml @ b'2f72bc870b01bca6e16246074881145a5223cba5' - A maintenance task process has been added to update the existing OVN LB HM ports to the new behaviour defined. Specifically, the "device_owner" field needs to be updated from network:distributed to ovn-lb-hm:distributed. Additionally, the "device_id" will be populated during update action. .. _ovn-octavia-provider_4.0.2_Other Notes: Other Notes ----------- .. releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml @ b'2f72bc870b01bca6e16246074881145a5223cba5' - A maintenance task thread has been added to work on periodic and one-shot tasks that also allows the future changes to perform the needed upgrades actions. .. _ovn-octavia-provider_2023.2-eol: 2023.2-eol ========== .. _ovn-octavia-provider_2023.2-eol_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/new-device-owner-627ee59b95b670b4.yaml @ b'74645e273fd256aa95188b4bbcb7e96e5435bba7' - In order to support the new 'device_owner' for OVN Load Balancer Health Monitor ports this version requires a Neutron version > 23.0.0rc2 .. _ovn-octavia-provider_2023.2-eol_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml @ b'6823f39399313fbefadc2860956c52268f7f2e77' - [`bug 2072754 `_] Fixed maintenance task that was breaking IPv4 load balancers with health monitors. .. releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml @ b'9a410d1b4206b5fecff81130b10fdb5429e04ba3' - A maintenance task process has been added to update the existing OVN LB HM ports to the new behaviour defined. Specifically, the "device_owner" field needs to be updated from network:distributed to ovn-lb-hm:distributed. Additionally, the "device_id" will be populated during update action. .. _ovn-octavia-provider_2023.2-eol_Other Notes: Other Notes ----------- .. releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml @ b'9a410d1b4206b5fecff81130b10fdb5429e04ba3' - A maintenance task thread has been added to work on periodic and one-shot tasks that also allows the future changes to perform the needed upgrades actions. .. _ovn-octavia-provider_5.0.0: 5.0.0 ===== .. _ovn-octavia-provider_5.0.0_New Features: New Features ------------ .. releasenotes/notes/session-persistence-b409428a8907f542.yaml @ b'382ddb0329f93873e25a55be65bf43000332a21a' - Now the OVN Octavia provider uses the affinity_timeout option of OVN Load Balancers to support pools sessions persistence. It only supports the SOURCE_IP option type. If not timeout is set, by default 360 seconds is set if the session persistence is enabled. .. _ovn-octavia-provider_5.0.0_Known Issues: Known Issues ------------ .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'ebfbd848b1d57a445e4a0336411491afe20afa8f' - Load Balancer Health Check for Floating IPs are not populated with the protocol port. At this way, when a backend is detected on ERROR state requests to the Floating IP are still distribute to the ERROR'ed members. In order to fix the existing Load Balancer Health Checks it is required to recreate the entire Octavia Health Monitor, which will recreate the associated OVN Load Balancer Health Check(s). .. _ovn-octavia-provider_5.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/adjust-and-deprecate-neutron-config-options-50edf01318758917.yaml @ b'220d8c8581c07839b9afa87f4ffee91a547593d5' - Authentication settings for Neutron should be added directly to the [neutron] section of the configuration now. The exact settings depend on the `auth_type` used. Refer to https://docs.openstack.org/keystoneauth/latest/plugin-options.html for a list of possible options. .. _ovn-octavia-provider_5.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/adjust-and-deprecate-neutron-config-options-50edf01318758917.yaml @ b'220d8c8581c07839b9afa87f4ffee91a547593d5' - As part of the effort to replace the deprecated `python-neutronclient` package in Octavia the following options in the [neutron] section of the Octavia configuration file have been marked as deprecated for removal: `endpoint` is replaced by the `endpoint_override` option, `endpoint_type` is replaced by the `valid_interfaces` option, and `ca_certificates_file` is replaced by the `cafile` option. In a future release `ovn-octavia-provider` will no longer take the authentication settings from the [service_auth] section as a fallback. It will require them to be in the [neutron] section. .. _ovn-octavia-provider_5.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml @ b'ebfbd848b1d57a445e4a0336411491afe20afa8f' - [`bug 1997418 `_] Added the protocol port to the Load Balancer Health Check associated with the Floating IP, additional fields have been introduced to the external_ids to provide more accuracy information about the entities affected by any change over the Floating IP or LB VIP. .. _ovn-octavia-provider_5.0.0_Other Notes: Other Notes ----------- .. releasenotes/notes/adjust-and-deprecate-neutron-config-options-50edf01318758917.yaml @ b'220d8c8581c07839b9afa87f4ffee91a547593d5' - Replaced code that uses the deprecated `python-neutronclient` library with code that uses `openstacksdk` and removed `python-neutronclient` as a dependency. .. _ovn-octavia-provider_6.0.0-15: 6.0.0-15 ======== .. _ovn-octavia-provider_6.0.0-15_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml @ b'8cabfc5b7cf630d72162c0980f77708a65086ea7' - [`bug 2072754 `_] Fixed maintenance task that was breaking IPv4 load balancers with health monitors. .. _ovn-octavia-provider_6.0.0: 6.0.0 ===== .. _ovn-octavia-provider_6.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/new-device-owner-627ee59b95b670b4.yaml @ b'e2dbc59be51597adbc54838fc5367a49b19ad5a2' - In order to support the new 'device_owner' for OVN Load Balancer Health Monitor ports this version requires a Neutron version > 23.0.0rc2 .. _ovn-octavia-provider_6.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml @ b'1661f3815c1518aa2e3f1761e33ec494be31d584' - A maintenance task process has been added to update the existing OVN LB HM ports to the new behaviour defined. Specifically, the "device_owner" field needs to be updated from network:distributed to ovn-lb-hm:distributed. Additionally, the "device_id" will be populated during update action. .. _ovn-octavia-provider_6.0.0_Other Notes: Other Notes ----------- .. releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml @ b'1661f3815c1518aa2e3f1761e33ec494be31d584' - A maintenance task thread has been added to work on periodic and one-shot tasks that also allows the future changes to perform the needed upgrades actions. .. _ovn-octavia-provider_7.0.0: 7.0.0 ===== .. _ovn-octavia-provider_7.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml @ b'ae1540bb1a04464c7065e542ec5e981947247f3b' - [`bug 2072754 `_] Fixed maintenance task that was breaking IPv4 load balancers with health monitors. .. _ovn-octavia-provider_8.0.0-4: 8.0.0-4 ======= .. _ovn-octavia-provider_8.0.0-4_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/add-neutron-client-interface-info-6a018cad49b5d240.yaml @ b'0401ccb0f59505258c3f4477910c9c0c70cde620' - [`bug 2110488 `_] Fixed wrong endpoint information in Neutron client configuration. .. _ovn-octavia-provider_8.0.0: 8.0.0 ===== .. _ovn-octavia-provider_8.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/remove-py38-4240ec2f24969054.yaml @ b'edc01ca672c2d8dd7fd04b04ff5c7643df9e7ecb' - Python 3.8 support was dropped. The minimum version of Python now supported is Python 3.9. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/bindep.txt0000664000175100017510000000023015033037524017774 0ustar00mylesmyles# This file contains runtime (non-python) dependencies # More info at: http://docs.openstack.org/infra/bindep/readme.html python3-devel [platform:rpm] ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5139847 ovn_octavia_provider-8.1.0.dev15/devstack/0000775000175100017510000000000015033037526017605 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/devstack/local.conf.sample0000664000175100017510000001010415033037524023020 0ustar00mylesmyles# # Sample DevStack local.conf. # # This sample file is intended to be used for your typical DevStack environment # that's running all of OpenStack on a single host. # # It will enable the use of OVN as Octavia's Provider driver. # [[local|localrc]] DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password ADMIN_PASSWORD=password SERVICE_PASSWORD=$ADMIN_PASSWORD # Logging # ------- # By default ``stack.sh`` output only goes to the terminal where it runs. It can # be configured to additionally log to a file by setting ``LOGFILE`` to the full # path of the destination log file. A timestamp will be appended to the given name. LOGFILE=$DEST/logs/stack.sh.log # Old log files are automatically removed after 7 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 # Nova logs will be colorized if ``SYSLOG`` is not set; turn this off by setting # ``LOG_COLOR`` false. #LOG_COLOR=False # Enable OVN Q_AGENT=ovn Q_ML2_PLUGIN_MECHANISM_DRIVERS=ovn,logger Q_ML2_PLUGIN_TYPE_DRIVERS=local,flat,vlan,geneve Q_ML2_TENANT_NETWORK_TYPE="geneve" # Enable OVN services enable_service ovn-northd enable_service ovn-controller enable_service q-ovn-metadata-agent # Use Neutron enable_service q-svc # Disable Neutron agents not used with OVN. disable_service q-agt disable_service q-l3 disable_service q-dhcp disable_service q-meta # Enable services, these services depend on neutron plugin. enable_plugin neutron https://opendev.org/openstack/neutron enable_service q-trunk enable_service q-dns #enable_service q-qos # Enable octavia tempest plugin tests enable_plugin octavia-tempest-plugin https://opendev.org/openstack/octavia-tempest-plugin disable_service horizon # Cinder (OpenStack Block Storage) is disabled by default to speed up # DevStack a bit. You may enable it here if you would like to use it. disable_service cinder c-sch c-api c-vol # A UUID to uniquely identify this system. If one is not specified, a random # one will be generated and saved in the file 'ovn-uuid' for re-use in future # DevStack runs. #OVN_UUID= # If using the OVN native layer-3 service, choose a router scheduler to # manage the distribution of router gateways on hypervisors/chassis. # Default value is leastloaded. #OVN_L3_SCHEDULER=leastloaded # The DevStack plugin defaults to using the ovn branch from the official ovs # repo. You can optionally use a different one. For example, you may want to # use the latest patches in blp's ovn branch (and see OVN_BUILD_FROM_SOURCE): #OVN_REPO=https://github.com/blp/ovs-reviews.git #OVN_BRANCH=ovn # NOTE: When specifying the branch, as shown above, you must also enable this! # By default, OVN will be installed from packages. In order to build OVN from # source, set OVN_BUILD_FROM_SOURCE=True #OVN_BUILD_FROM_SOURCE=False # If the admin wants to enable this chassis to host gateway routers for # external connectivity, then set ENABLE_CHASSIS_AS_GW to True. # Then devstack will set ovn-cms-options with enable-chassis-as-gw # in Open_vSwitch table's external_ids column. # If this option is not set on any chassis, all the of them with bridge # mappings configured will be eligible to host a gateway. ENABLE_CHASSIS_AS_GW=True # If you wish to use the provider network for public access to the cloud, # set the following #Q_USE_PROVIDERNET_FOR_PUBLIC=True # Create public bridge OVN_L3_CREATE_PUBLIC_NETWORK=True # This needs to be equalized with Neutron devstack PUBLIC_NETWORK_GATEWAY="172.24.4.1" # Octavia configuration OCTAVIA_NODE="api" DISABLE_AMP_IMAGE_BUILD=True enable_plugin barbican https://opendev.org/openstack/barbican enable_plugin octavia https://opendev.org/openstack/octavia enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard LIBS_FROM_GIT+=python-octaviaclient enable_service octavia enable_service o-api enable_service o-hk enable_service o-da disable_service o-cw disable_service o-hm # OVN octavia provider plugin enable_plugin ovn-octavia-provider https://opendev.org/openstack/ovn-octavia-provider [[post-config|$NOVA_CONF]] [scheduler] discover_hosts_in_cells_interval = 2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/devstack/plugin.sh0000775000175100017510000000354415033037524021446 0ustar00mylesmyles#!/usr/bin/env bash # How to connect to ovsdb-server hosting the OVN NB database if is_service_enabled tls-proxy; then OVN_PROTO=ssl else OVN_PROTO=tcp fi OVN_NB_REMOTE=${OVN_NB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6641} OVN_SB_REMOTE=${OVN_SB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6642} function _configure_provider_driver { iniset ${OCTAVIA_CONF} api_settings enabled_provider_drivers "${OCTAVIA_PROVIDER_DRIVERS}" iniset ${OCTAVIA_CONF} driver_agent enabled_provider_agents ${OCTAVIA_PROVIDER_AGENTS} iniset ${OCTAVIA_CONF} ovn ovn_nb_connection "$OVN_NB_REMOTE" iniset ${OCTAVIA_CONF} ovn ovn_sb_connection "$OVN_SB_REMOTE" if is_service_enabled tls-proxy; then iniset ${OCTAVIA_CONF} ovn ovn_nb_connection "$OVN_NB_REMOTE" iniset ${OCTAVIA_CONF} ovn ovn_nb_ca_cert "$INT_CA_DIR/ca-chain.pem" iniset ${OCTAVIA_CONF} ovn ovn_nb_certificate "$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" iniset ${OCTAVIA_CONF} ovn ovn_nb_private_key "$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" iniset ${OCTAVIA_CONF} ovn ovn_sb_connection "$OVN_SB_REMOTE" iniset ${OCTAVIA_CONF} ovn ovn_sb_ca_cert "$INT_CA_DIR/ca-chain.pem" iniset ${OCTAVIA_CONF} ovn ovn_sb_certificate "$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" iniset ${OCTAVIA_CONF} ovn ovn_sb_private_key "$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" fi } function is_ovn_enabled { if [[ $NEUTRON_AGENT == "ovn" || $Q_AGENT == "ovn" ]]; then return 0 fi return 1 } function _install_provider_driver { setup_develop $OVN_OCTAVIA_PROVIDER_DIR } if [[ "$1" == "stack" ]]; then case "$2" in post-config) if is_ovn_enabled; then _configure_provider_driver fi ;; install) if is_ovn_enabled; then _install_provider_driver fi ;; esac fi ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/devstack/settings0000664000175100017510000000057615033037524021376 0ustar00mylesmylesOCTAVIA_DIR=${OCTAVIA_DIR:-"${DEST}/octavia"} OCTAVIA_CONF_DIR=${OCTAVIA_CONF_DIR:-"/etc/octavia"} OCTAVIA_PROVIDER_DRIVERS=${OCTAVIA_PROVIDER_DRIVERS:-"amphora:The Octavia Amphora driver.,octavia:Deprecated alias of the Octavia Amphora driver.,ovn:Octavia OVN driver."} OCTAVIA_PROVIDER_AGENTS=${OCTAVIA_PROVIDER_AGENTS:-"ovn"} OVN_OCTAVIA_PROVIDER_DIR=$DEST/ovn-octavia-provider ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5139847 ovn_octavia_provider-8.1.0.dev15/doc/0000775000175100017510000000000015033037526016546 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/requirements.txt0000664000175100017510000000053315033037524022031 0ustar00mylesmyles# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. sphinx>=2.0.0,!=2.1.0 # BSD sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD openstackdocstheme>=2.2.1 # Apache-2.0 reno>=3.1.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5139847 ovn_octavia_provider-8.1.0.dev15/doc/source/0000775000175100017510000000000015033037526020046 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5139847 ovn_octavia_provider-8.1.0.dev15/doc/source/_static/0000775000175100017510000000000015033037526021474 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/source/_static/.placeholder0000664000175100017510000000000015033037524023743 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5139847 ovn_octavia_provider-8.1.0.dev15/doc/source/admin/0000775000175100017510000000000015033037526021136 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/source/admin/driver.rst0000664000175100017510000005670415033037524023175 0ustar00mylesmyles.. _driver: ==================================== OVN as a Provider Driver for Octavia ==================================== Octavia has integrated support for provider drivers where any third party Load Balancer driver can be integrated with Octavia. Functionality related to this has been developed in OVN and now OVN can now be supported as a provider driver for Octavia. The OVN Provider driver has a few advantages when used as a provider driver for Octavia over Amphora, like: * OVN can be deployed without VMs, so there is no additional overhead as is required currently in Octavia when using the default Amphora driver. * OVN Load Balancers can be deployed faster than default Load Balancers in Octavia (which use Amphora currently) because of no additional deployment requirement. * Since OVN supports virtual networking for both VMs and containers, OVN as a Load Balancer driver can be used succesfully with Kuryr Kubernetes[1]. Limitations of the OVN Provider Driver -------------------------------------- OVN has its own set of limitations when considered as an Load Balancer driver. These include: * OVN currently supports TCP, UDP and SCTP, so Layer-7 based load balancing is not possible with OVN. * Currently, the OVN Provider Driver supports a 1:1 protocol mapping between Listeners and associated Pools, i.e. a Listener which can handle TCP protocols can only be used with pools associated to the TCP protocol. Pools handling UDP protocols cannot be linked with TCP based Listeners. This limitation will be handled in an upcoming core OVN release. * IPv6 support is not tested by Tempest. * Mixed IPv4 and IPv6 members are not supported. * Only the SOURCE_IP_PORT load balancing algorithm is supported, others like ROUND_ROBIN and LEAST_CONNECTIONS are not currently supported. * OVN supports health checks for TCP and UDP-CONNECT protocols, but not for SCTP. Therefore, when configuring a health monitor, you cannot use SCTP as the type. * Due to nature of OVN octavia driver (flows distributed in all the nodes) there is no need for some of the amphora specific functionality that is specific to the fact that a VM is created for the load balancing actions. As an example, there is no need for flavors (no VM is created), failovers (no need to recover a VM), or HA (no need to create extra VMs as in the ovn-octavia case the flows are injected in all the nodes, i.e., it is HA by default). Creating an OVN based Load Balancer ----------------------------------- The OVN provider driver can be tested out on DevStack using the configuration options in: .. literalinclude:: ../../../devstack/local.conf.sample Kindly note that the configuration allows the user to create Load Balancers of both Amphora and OVN types. Once the DevStack run is complete, the user can create a load balancer in Openstack:: $ openstack loadbalancer create --vip-network-id public --provider ovn +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | admin_state_up | True | | created_at | 2018-12-13T09:08:14 | | description | | | flavor | | | id | 94e7c431-912b-496c-a247-d52875d44ac7 | | listeners | | | name | | | operating_status | OFFLINE | | pools | | | project_id | af820b57868c4864957d523fb32ccfba | | provider | ovn | | provisioning_status | PENDING_CREATE | | updated_at | None | | vip_address | 172.24.4.9 | | vip_network_id | ee97665d-69d0-4995-a275-27855359956a | | vip_port_id | c98e52d0-5965-4b22-8a17-a374f4399193 | | vip_qos_policy_id | None | | vip_subnet_id | 3eed0c05-6527-400e-bb80-df6e59d248f1 | +---------------------+--------------------------------------+ The user can see the different types of loadbalancers with their associated providers as below:: +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+ | id | name | project_id | vip_address | provisioning_status | provider | +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+ | c5f2070c-d51d-46f0-bec6-dd05e7c19370 | | af820b57868c4864957d523fb32ccfba | 172.24.4.10 | ACTIVE | amphora | | 94e7c431-912b-496c-a247-d52875d44ac7 | | af820b57868c4864957d523fb32ccfba | 172.24.4.9 | ACTIVE | ovn | +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+ Now we can see that OVN will show the load balancer in its *loadbalancer* table:: $ ovn-nbctl list load_balancer _uuid : c72de15e-5c2e-4c1b-a21b-8e9a6721193c external_ids : {enabled=True, lr_ref="neutron-3d2a873b-b5b4-4d14-ac24-47a835fd47b2", ls_refs="{\"neutron-ee97665d-69d0-4995-a275-27855359956a\": 1}", "neutron:vip"="172.24.4.9", "neutron:vip_port_id"="c98e52d0-5965-4b22-8a17-a374f4399193"} name : "94e7c431-912b-496c-a247-d52875d44ac7" protocol : tcp vips : {} Next, a Listener can be created for the associated Load Balancer:: $ openstack loadbalancer listener create --protocol TCP --protocol-port / 64015 94e7c431-912b-496c-a247-d52875d44ac7 +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | True | | connection_limit | -1 | | created_at | 2018-12-13T09:14:51 | | default_pool_id | None | | default_tls_container_ref | None | | description | | | id | 21e77cde-854f-4c3e-bd8c-9536ae0443bc | | insert_headers | None | | l7policies | | | loadbalancers | 94e7c431-912b-496c-a247-d52875d44ac7 | | name | | | operating_status | OFFLINE | | project_id | af820b57868c4864957d523fb32ccfba | | protocol | TCP | | protocol_port | 64015 | | provisioning_status | PENDING_CREATE | | sni_container_refs | [] | | timeout_client_data | 50000 | | timeout_member_connect | 5000 | | timeout_member_data | 50000 | | timeout_tcp_inspect | 0 | | updated_at | None | +---------------------------+--------------------------------------+ OVN updates the Listener information in the Load Balancer table:: $ ovn-nbctl list load_balancer _uuid : c72de15e-5c2e-4c1b-a21b-8e9a6721193c external_ids : {enabled=True, "listener_21e77cde-854f-4c3e-bd8c-9536ae0443bc"="64015:", lr_ref="neutron-3d2a873b-b5b4-4d14-ac24-47a835fd47b2", ls_refs="{\"neutron-ee97665d-69d0-4995-a275-27855359956a\": 1}", "neutron:vip"="172.24.4.9", "neutron:vip_port_id"="c98e52d0-5965-4b22-8a17-a374f4399193"} name : "94e7c431-912b-496c-a247-d52875d44ac7" protocol : tcp vips : {} Next, a Pool is associated with the Listener:: $ openstack loadbalancer pool create --protocol TCP --lb-algorithm / SOURCE_IP_PORT --listener 21e77cde-854f-4c3e-bd8c-9536ae0443bc +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | admin_state_up | True | | created_at | 2018-12-13T09:21:37 | | description | | | healthmonitor_id | | | id | 898be8a2-5185-4f3b-8658-a56457f595a9 | | lb_algorithm | SOURCE_IP_PORT | | listeners | 21e77cde-854f-4c3e-bd8c-9536ae0443bc | | loadbalancers | 94e7c431-912b-496c-a247-d52875d44ac7 | | members | | | name | | | operating_status | OFFLINE | | project_id | af820b57868c4864957d523fb32ccfba | | protocol | TCP | | provisioning_status | PENDING_CREATE | | session_persistence | None | | updated_at | None | +---------------------+--------------------------------------+ OVN's Load Balancer table is modified as below:: $ ovn-nbctl list load_balancer _uuid : c72de15e-5c2e-4c1b-a21b-8e9a6721193c external_ids : {enabled=True, "listener_21e77cde-854f-4c3e-bd8c-9536ae0443bc"="64015:", lr_ref="neutron-3d2a873b-b5b4-4d14-ac24-47a835fd47b2", ls_refs="{\"neutron-ee97665d-69d0-4995-a275-27855359956a\": 1}", "neutron:vip"="172.24.4.9", "neutron:vip_port_id"="c98e52d0-5965-4b22-8a17-a374f4399193", "pool_898be8a2-5185-4f3b-8658-a56457f595a9"=""} name : "94e7c431-912b-496c-a247-d52875d44ac7" protocol : tcp vips : {} Lastly, when a member is created, OVN's Load Balancer table is complete:: $ openstack loadbalancer member create --address 10.10.10.10 / --protocol-port 63015 898be8a2-5185-4f3b-8658-a56457f595a9 +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | address | 10.10.10.10 | | admin_state_up | True | | created_at | 2018-12-13T09:26:05 | | id | adf55e70-3d50-4e62-99fd-dd77eababb1c | | name | | | operating_status | NO_MONITOR | | project_id | af820b57868c4864957d523fb32ccfba | | protocol_port | 63015 | | provisioning_status | PENDING_CREATE | | subnet_id | None | | updated_at | None | | weight | 1 | | monitor_port | None | | monitor_address | None | | backup | False | +---------------------+--------------------------------------+ $ ovn-nbctl list load_balancer _uuid : c72de15e-5c2e-4c1b-a21b-8e9a6721193c external_ids : {enabled=True, "listener_21e77cde-854f-4c3e-bd8c-9536ae0443bc"="64015:pool_898be8a2-5185-4f3b-8658-a56457f595a9", lr_ref="neutron-3d2a873b-b5b4-4d14-ac24-47a835fd47b2", ls_refs="{\"neutron-ee97665d-69d0-4995-a275-27855359956a\": 1}", "neutron:vip"="172.24.4.9", "neutron:vip_port_id"="c98e52d0-5965-4b22-8a17-a374f4399193", "pool_898be8a2-5185-4f3b-8658-a56457f595a9"="member_adf55e70-3d50-4e62-99fd-dd77eababb1c_10.10.10.10:63015"} name : "94e7c431-912b-496c-a247-d52875d44ac7" protocol : tcp vips : {"172.24.4.9:64015"="10.10.10.10:63015"} Octavia DB to OVN database population -------------------------------------- In case of OVN DB clustering failure and Load Balancer data loss as a result, you can always re-populate data in OVN NB/SB from the information store in Octavia database. With that objective the tool octavia-ovn-db-sync-util was created. It is a command-line tool that allows synchronizing the state of Octavia resources (such as Load Balancers, Listeners, Pools, etc.) with the OVN Northbound (NB)/Southbound (SB) database. This is especially useful in situations where: * Inconsistencies have occurred between Octavia and OVN. * The OVN database has been restored or recreated. * A migration or repair of load balancing resources is required. For that, you can execute the following:: (venv) stack@ubuntu2404:~/ovn-octavia-provider$ octavia-ovn-db-sync-util INFO ovn_octavia_provider.cmd.octavia_ovn_db_sync_util [-] OVN Octavia DB sync start. INFO ovn_octavia_provider.driver [-] Starting sync OVN DB with Loadbalancer filter {'provider': 'ovn'} INFO ovn_octavia_provider.driver [-] Starting sync OVN DB with Loadbalancer lb1 DEBUG ovn_octavia_provider.driver [-] OVN loadbalancer 5bcaab92-3f8e-4460-b34d-4437a86909ef not found. Start create process. {{(pid=837681) _ensure_loadbalancer /opt/stack/ovn-octavia-provider/ovn_octavia_provider/driver.py:684}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbCreateCommand(_result=None, table=Load_Balancer, columns={'name': '5bcaab92-3f8e-4460-b34d-4437a86909ef', 'protocol': [], 'external_ids': {'neutron:vip': '192.168.100.188', 'neutron:vip_port_id': 'e60041e8-01e8-459b-956e-a55608eb5255', 'enabled': 'True'}, 'selection_fields': ['ip_src', 'ip_dst', 'tp_src', 'tp_dst']}, row=False) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): LsLbAddCommand(_result=None, switch=000a1a3e-edff-45ad-9241-5ab8894ac0e0, lb=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, may_exist=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'ls_refs': '{"neutron-000a1a3e-edff-45ad-9241-5ab8894ac0e0": 1}'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): LrLbAddCommand(_result=None, router=f17e58b5-37d2-4daf-a02f-82fb4974f7b8, lb=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, may_exist=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): LsLbAddCommand(_result=None, switch=neutron-000a1a3e-edff-45ad-9241-5ab8894ac0e0, lb=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, may_exist=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=2): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'lr_ref': 'neutron-d2dd599c-76c7-43c1-8383-1bae5593681a'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('protocol', 'tcp'),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'listener_30ac9d4e-4fdd-4885-8949-6a2e7355beb2': '80:pool_5814b9e6-db7e-425d-a4cf-1cb668ba7080'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('protocol', 'tcp'),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=2): DbClearCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, column=vips) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=3): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('vips', {}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'enabled': 'True', 'neutron:vip': '192.168.100.188', 'neutron:vip_port_id': 'e60041e8-01e8-459b-956e-a55608eb5255', 'ls_refs': '{"neutron-000a1a3e-edff-45ad-9241-5ab8894ac0e0": 1}', 'lr_ref': 'neutron-d2dd599c-76c7-43c1-8383-1bae5593681a', 'listener_30ac9d4e-4fdd-4885-8949-6a2e7355beb2': '80:pool_5814b9e6-db7e-425d-a4cf-1cb668ba7080', 'pool_5814b9e6-db7e-425d-a4cf-1cb668ba7080': ''}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovn_octavia_provider.helper [-] no member status on external_ids: None {{(pid=837681) _find_member_status /opt/stack/ovn-octavia-provider/ovn_octavia_provider/helper.py:2490}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'pool_5814b9e6-db7e-425d-a4cf-1cb668ba7080': 'member_94ceacd8-1a81-4de9-ac0e-18b8e41cf80f_192.168.100.194:80_b97280a1-b19f-4989-a56c-2eb341c23171'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbClearCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, column=vips) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=2): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('vips', {'192.168.100.188:80': '192.168.100.194:80'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'ls_refs': '{"neutron-000a1a3e-edff-45ad-9241-5ab8894ac0e0": 2}'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): LrLbAddCommand(_result=None, router=f17e58b5-37d2-4daf-a02f-82fb4974f7b8, lb=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, may_exist=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): LsLbAddCommand(_result=None, switch=neutron-000a1a3e-edff-45ad-9241-5ab8894ac0e0, lb=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, may_exist=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129}} DEBUG ovn_octavia_provider.helper [-] no member status on external_ids: None {{(pid=837681) _update_external_ids_member_status /opt/stack/ovn-octavia-provider/ovn_octavia_provider/helper.py:2521}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'neutron:member_status': '{"94ceacd8-1a81-4de9-ac0e-18b8e41cf80f": "NO_MONITOR"}'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovn_octavia_provider.helper [-] Updating status to octavia: {'loadbalancers': [{'id': '5bcaab92-3f8e-4460-b34d-4437a86909ef', 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'listeners': [{'id': '30ac9d4e-4fdd-4885-8949-6a2e7355beb2', 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'pools': [{'id': '5814b9e6-db7e-425d-a4cf-1cb668ba7080', 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'members': [{'id': '94ceacd8-1a81-4de9-ac0e-18b8e41cf80f', 'provisioning_status': 'ACTIVE', 'operating_status': 'NO_MONITOR'}]} {{(pid=837681) _update_status_to_octavia /opt/stack/ovn-octavia-provider/ovn_octavia_provider/helper.py:428}} INFO ovn_octavia_provider.driver [-] Starting sync floating IP for loadbalancer 5bcaab92-3f8e-4460-b34d-4437a86909ef WARNING ovn_octavia_provider.driver [-] Floating IP not found for loadbalancer 5bcaab92-3f8e-4460-b34d-4437a86909ef INFO ovn_octavia_provider.cmd.octavia_ovn_db_sync_util [-] OVN Octavia DB sync finish. [1]: https://docs.openstack.org/kuryr-kubernetes/latest/installation/services.html ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/source/admin/index.rst0000664000175100017510000000015015033037524022771 0ustar00mylesmyles==================== Administration Guide ==================== .. toctree:: :maxdepth: 1 driver ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/source/conf.py0000664000175100017510000000534415033037524021351 0ustar00mylesmyles# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'openstackdocstheme', 'oslo_config.sphinxext', 'sphinxcontrib.rsvgconverter', ] # Project cross-reference roles openstackdocs_projects = [ 'neutron', 'octavia', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/ovn-octavia-provider' openstackdocs_pdf_link = True openstackdocs_bug_project = 'neutron' openstackdocs_bug_tag = 'ovn-octavia-provider' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_static_path = ['_static'] html_theme = 'openstackdocs' # Output file base name for HTML help builder. htmlhelp_basename = 'ovn-octavia-providerdoc' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('pdf-index', 'doc-ovn-octavia-provider.tex', 'OVN Octavia Provider Documentation', 'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5139847 ovn_octavia_provider-8.1.0.dev15/doc/source/configuration/0000775000175100017510000000000015033037526022715 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/source/configuration/config.rst0000664000175100017510000000073215033037524024714 0ustar00mylesmyles======================= Configuration Reference ======================= This section provides a list of all configuration options for OVN Octavia provider. These are auto-generated from OVN Octavia provider code when this documentation is built. Configuration filenames used below are filenames usually used, but there is no restriction on configuration filename and you can use arbitrary file names. .. show-options:: :config-file: etc/oslo-config-generator/ovn.conf ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/source/configuration/index.rst0000664000175100017510000000016715033037524024560 0ustar00mylesmyles.. _configuring: =================== Configuration Guide =================== .. toctree:: :maxdepth: 1 config ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5139847 ovn_octavia_provider-8.1.0.dev15/doc/source/contributor/0000775000175100017510000000000015033037526022420 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/source/contributor/index.rst0000664000175100017510000000017515033037524024262 0ustar00mylesmyles========================= Contributor Documentation ========================= .. toctree:: :maxdepth: 2 loadbalancer ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/source/contributor/loadbalancer.rst0000664000175100017510000006022115033037524025560 0ustar00mylesmyles.. _loadbalancer: ================================== OpenStack LoadBalancer API and OVN ================================== Introduction ------------ Load balancing is essential for enabling simple or automatic delivery scaling and availability since application delivery, scaling and availability are considered vital features of any cloud. Octavia is an open source, operator-scale load balancing solution designed to work with OpenStack. The purpose of this document is to propose a design for how we can use OVN as the backend for OpenStack's LoadBalancer API provided by Octavia. Octavia LoadBalancers Today --------------------------- A Detailed design analysis of Octavia is available here: https://docs.openstack.org/octavia/latest/contributor/design/version0.5/component-design.html Currently, Octavia uses the built-in Amphorae driver to fulfill the Loadbalancing requests in Openstack. Amphorae can be a Virtual machine, container, dedicated hardware, appliance or device that actually performs the task of load balancing in the Octavia system. More specifically, an amphora takes requests from clients on the front-end and distributes these to back-end systems. Amphorae communicates with its controllers over the LoadBalancer's network through a driver interface on the controller. Amphorae needs a placeholder, such as a separate VM/Container for deployment, so that it can handle the LoadBalancer's requests. Along with this, it also needs a separate network (termed as lb-mgmt-network) which handles all Amphorae requests. Amphorae has the capability to handle L4 (TCP/UDP) as well as L7 (HTTP) LoadBalancer requests and provides monitoring features using HealthMonitors. Octavia with OVN ---------------- The OVN native LoadBalancer currently supports L4 protocols, with support for L7 protocols aimed for future releases. It does not need any extra hardware/VM/Container for deployment, which is a major positive point when compared with Amphorae. Also, it does not need any special network to handle the LoadBalancer's requests as they are taken care by OpenFlow rules directly. And, though OVN does not have support for TLS, it is in development and once implemented can be integrated with Octavia. This following section details how OVN can be used as an Octavia driver. Overview of Proposed Approach ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The OVN Driver for Octavia runs under the scope of Octavia. The Octavia API receives and forwards calls to the OVN Driver. **Step 1** - Creating a LoadBalancer The Octavia API receives and issues a LoadBalancer creation request on a network to the OVN Provider driver. The OVN driver creates a LoadBalancer in the OVN NorthBound DB and asynchronously updates the Octavia DB with the status response. A VIP port is created in Neutron when the LoadBalancer creation is complete. The VIP information however is not updated in the NorthBound DB until the Members are associated with the LoadBalancer's Pool. **Step 2** - Creating LoadBalancer entities (Pools, Listeners, Members) Once a LoadBalancer is created by OVN in its NorthBound DB, users can now create Pools, Listeners and Members associated with the LoadBalancer using the Octavia API. With the creation of each entity, the LoadBalancer's *external_ids* column in the NorthBound DB will be updated and corresponding Logical and Openflow rules will be added for handling them. **Step 3** - LoadBalancer request processing When a user sends a request to the VIP IP address, the OVN pipeline takes care of load balancing the VIP request to one of the backend members. More information about this can be found in the ovn-northd man pages. OVN LoadBalancer Driver Logic ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * On startup: Open and maintain a connection to the OVN Northbound DB (using the ovsdbapp library). On first connection, and anytime a reconnect happens: * Do a full sync. * Register a callback when a new interface is added or deleted from a router or switch. The LogicalSwitchPortUpdateEvent and LogicalRouterPortEvent are registered to process these events. * When a new LoadBalancer L1 is created, create a Row in OVN's ``Load_Balancer`` table and update its entries for name and network references. If the network on which the LoadBalancer is created is associated with a router, say R1, then add the router reference to the LoadBalancer's *external_ids* and associate the LoadBalancer to the router. Also associate the LoadBalancer L1 with all those networks which have an interface on the router R1. This is required so that Logical Flows for inter-network communication while using the LoadBalancer L1 is possible. Also, during this time, a new port is created via Neutron which acts as a VIP Port. The information of this new port is not visible in OVN's NorthBound DB until a member is added to the LoadBalancer. * If a new network interface is added to the router R1 described above, all the LoadBalancers on that network are associated with the router R1 and all the LoadBalancers on the router are associated with the new network. * If a network interface is removed from the router R1, then all the LoadBalancers which have been solely created on that network (identified using the *ls_ref* attribute in the LoadBalancer's *external_ids*) are removed from the router. Similarly, those LoadBalancers which are associated with the network but not actually created on that network are removed from the network. * A LoadBalancer can either be deleted with all its children entities using the *cascade* option, or its members/pools/listeners can be individually deleted. When the LoadBalancer is deleted, its references and associations from all networks and routers are removed. This might change in the future once the association of LoadBalancers with networks/routers are changed to *weak* from *strong* [3]. Also the VIP port is deleted when the LoadBalancer is deleted. OVN LoadBalancer at work ~~~~~~~~~~~~~~~~~~~~~~~~ OVN Northbound schema [5] has a table to store LoadBalancers. The table looks like:: "Load_Balancer": { "columns": { "name": {"type": "string"}, "vips": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp"]]}, "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, There is a ``load_balancer`` column in the Logical_Switch table (which corresponds to a Neutron network) as well as the Logical_Router table (which corresponds to a Neutron router) referring back to the 'Load_Balancer' table. The OVN driver updates the OVN Northbound DB. When a LoadBalancer is created, a row in this table is created. When the listeners and members are added, the 'vips' column and the Logical_Switch's ``load_balancer`` column are updated accordingly. The ovn-northd service, which monitors for changes to the OVN Northbound DB, generates OVN logical flows to enable load balancing, and ovn-controller running on each compute node translates the logical flows into actual OpenFlow rules. The status of each entity in the Octavia DB is managed according to [4] Below are a few examples on what happens when LoadBalancer commands are executed and what changes in the Load_Balancer Northbound DB table. 1. Create a LoadBalancer:: $ openstack loadbalancer create --provider ovn --vip-subnet-id=private lb1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 1}", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 2. Create a pool:: $ openstack loadbalancer pool create --name p1 --loadbalancer lb1 --protocol TCP --lb-algorithm SOURCE_IP_PORT $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 1}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"="", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 3. Create a member:: $ openstack loadbalancer member create --address 10.0.0.107 --subnet-id 2d54ec67-c589-473b-bc67-41f3d1331fef --protocol-port 80 p1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 2}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"= "member_579c0c9f-d37d-4ba5-beed-cabf6331032d_10.0.0.107:80", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 4. Create another member:: $ openstack loadbalancer member create --address 20.0.0.107 --subnet-id c2e2da10-1217-4fe2-837a-1c45da587df7 --protocol-port 80 p1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 2, \"neutron-12c42705-3e15-4e2d-8fc0-070d1b80b9ef\": 1}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"= "member_579c0c9f-d37d-4ba5-beed-cabf6331032d_10.0.0.107:80, member_d100f2ed-9b55-4083-be78-7f203d095561_20.0.0.107:80", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 5. Create a listener:: $ openstack loadbalancer listener create --name l1 --protocol TCP --protocol-port 82 --default-pool p1 lb1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 2, \"neutron-12c42705-3e15-4e2d-8fc0-070d1b80b9ef\": 1}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"="10.0.0.107:80,20.0.0.107:80", "listener_12345678-2501-43f2-b34e-38a9cb7e4132"= "82:pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {"10.0.0.10:82"="10.0.0.107:80,20.0.0.107:80"} As explained earlier in the design section: - If a network N1 has a LoadBalancer LB1 associated to it and one of its interfaces is added to a router R1, LB1 is associated with R1 as well. - If a network N2 has a LoadBalancer LB2 and one of its interfaces is added to the router R1, then R1 will have both LoadBalancers LB1 and LB2. N1 and N2 will also have both the LoadBalancers associated to them. However, kindly note that although network N1 would have both LB1 and LB2 LoadBalancers associated with it, only LB1 would be the LoadBalancer which has a direct reference to the network N1, since LB1 was created on N1. This is visible in the ``ls_ref`` key of the ``external_ids`` column in LB1's entry in the ``load_balancer`` table. - If a network N3 is added to the router R1, N3 will also have both LoadBalancers (LB1, LB2) associated to it. - If the interface to network N2 is removed from R1, network N2 will now only have LB2 associated with it. Networks N1 and N3 and router R1 will have LoadBalancer LB1 associated with them. Limitations ----------- The Following actions are not supported by the OVN Provider Driver: - Creating a LoadBalancer/Listener/Pool with an L7 Protocol - Currently only one algorithm is supported for pool management (Source IP Port) - Due to nature of OVN octavia driver (flows distributed in all the nodes) there is no need for some of the amphora specific functionality that is specific to the fact that a VM is created for the load balancing actions. As an example, there is no need for flavors (no VM is created), failovers (no need to recover a VM), or HA (no need to create extra VMs as in the ovn-octavia case the flows are injected in all the nodes, i.e., it is HA by default). Support Matrix -------------- A detailed matrix of the operations supported by OVN Provider driver in Octavia can be found in https://docs.openstack.org/octavia/latest/user/feature-classification/index.html Octavia DB to OVN database population -------------------------------------- In case of OVN DB clustering failure and Load Balancer data loss as a result, you can always re-populate data in OVN NB/SB from the information store in Octavia database. With that objective the tool octavia-ovn-db-sync-util was created. It is a command-line tool that allows synchronizing the state of Octavia resources (such as Load Balancers, Listeners, Pools, etc.) with the OVN Northbound (NB)/Southbound (SB) database. This is especially useful in situations where: - Inconsistencies have occurred between Octavia and OVN. - The OVN database has been restored or recreated. - A migration or repair of load balancing resources is required. For that, you can execute the following:: (venv) stack@ubuntu2404:~/ovn-octavia-provider$ octavia-ovn-db-sync-util INFO ovn_octavia_provider.cmd.octavia_ovn_db_sync_util [-] OVN Octavia DB sync start. INFO ovn_octavia_provider.driver [-] Starting sync OVN DB with Loadbalancer filter {'provider': 'ovn'} INFO ovn_octavia_provider.driver [-] Starting sync OVN DB with Loadbalancer lb1 DEBUG ovn_octavia_provider.driver [-] OVN loadbalancer 5bcaab92-3f8e-4460-b34d-4437a86909ef not found. Start create process. {{(pid=837681) _ensure_loadbalancer /opt/stack/ovn-octavia-provider/ovn_octavia_provider/driver.py:684}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbCreateCommand(_result=None, table=Load_Balancer, columns={'name': '5bcaab92-3f8e-4460-b34d-4437a86909ef', 'protocol': [], 'external_ids': {'neutron:vip': '192.168.100.188', 'neutron:vip_port_id': 'e60041e8-01e8-459b-956e-a55608eb5255', 'enabled': 'True'}, 'selection_fields': ['ip_src', 'ip_dst', 'tp_src', 'tp_dst']}, row=False) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): LsLbAddCommand(_result=None, switch=000a1a3e-edff-45ad-9241-5ab8894ac0e0, lb=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, may_exist=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'ls_refs': '{"neutron-000a1a3e-edff-45ad-9241-5ab8894ac0e0": 1}'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): LrLbAddCommand(_result=None, router=f17e58b5-37d2-4daf-a02f-82fb4974f7b8, lb=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, may_exist=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): LsLbAddCommand(_result=None, switch=neutron-000a1a3e-edff-45ad-9241-5ab8894ac0e0, lb=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, may_exist=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=2): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'lr_ref': 'neutron-d2dd599c-76c7-43c1-8383-1bae5593681a'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('protocol', 'tcp'),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'listener_30ac9d4e-4fdd-4885-8949-6a2e7355beb2': '80:pool_5814b9e6-db7e-425d-a4cf-1cb668ba7080'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('protocol', 'tcp'),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=2): DbClearCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, column=vips) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=3): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('vips', {}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'enabled': 'True', 'neutron:vip': '192.168.100.188', 'neutron:vip_port_id': 'e60041e8-01e8-459b-956e-a55608eb5255', 'ls_refs': '{"neutron-000a1a3e-edff-45ad-9241-5ab8894ac0e0": 1}', 'lr_ref': 'neutron-d2dd599c-76c7-43c1-8383-1bae5593681a', 'listener_30ac9d4e-4fdd-4885-8949-6a2e7355beb2': '80:pool_5814b9e6-db7e-425d-a4cf-1cb668ba7080', 'pool_5814b9e6-db7e-425d-a4cf-1cb668ba7080': ''}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovn_octavia_provider.helper [-] no member status on external_ids: None {{(pid=837681) _find_member_status /opt/stack/ovn-octavia-provider/ovn_octavia_provider/helper.py:2490}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'pool_5814b9e6-db7e-425d-a4cf-1cb668ba7080': 'member_94ceacd8-1a81-4de9-ac0e-18b8e41cf80f_192.168.100.194:80_b97280a1-b19f-4989-a56c-2eb341c23171'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbClearCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, column=vips) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=2): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('vips', {'192.168.100.188:80': '192.168.100.194:80'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'ls_refs': '{"neutron-000a1a3e-edff-45ad-9241-5ab8894ac0e0": 2}'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): LrLbAddCommand(_result=None, router=f17e58b5-37d2-4daf-a02f-82fb4974f7b8, lb=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, may_exist=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): LsLbAddCommand(_result=None, switch=neutron-000a1a3e-edff-45ad-9241-5ab8894ac0e0, lb=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, may_exist=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129}} DEBUG ovn_octavia_provider.helper [-] no member status on external_ids: None {{(pid=837681) _update_external_ids_member_status /opt/stack/ovn-octavia-provider/ovn_octavia_provider/helper.py:2521}} DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Load_Balancer, record=d69e29cd-0069-4d7f-a1ed-08c246bfb3da, col_values=(('external_ids', {'neutron:member_status': '{"94ceacd8-1a81-4de9-ac0e-18b8e41cf80f": "NO_MONITOR"}'}),), if_exists=True) {{(pid=837681) do_commit /opt/stack/ovn-octavia-provider/venv/lib/python3.12/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89}} DEBUG ovn_octavia_provider.helper [-] Updating status to octavia: {'loadbalancers': [{'id': '5bcaab92-3f8e-4460-b34d-4437a86909ef', 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'listeners': [{'id': '30ac9d4e-4fdd-4885-8949-6a2e7355beb2', 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'pools': [{'id': '5814b9e6-db7e-425d-a4cf-1cb668ba7080', 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'members': [{'id': '94ceacd8-1a81-4de9-ac0e-18b8e41cf80f', 'provisioning_status': 'ACTIVE', 'operating_status': 'NO_MONITOR'}]} {{(pid=837681) _update_status_to_octavia /opt/stack/ovn-octavia-provider/ovn_octavia_provider/helper.py:428}} INFO ovn_octavia_provider.driver [-] Starting sync floating IP for loadbalancer 5bcaab92-3f8e-4460-b34d-4437a86909ef WARNING ovn_octavia_provider.driver [-] Floating IP not found for loadbalancer 5bcaab92-3f8e-4460-b34d-4437a86909ef INFO ovn_octavia_provider.cmd.octavia_ovn_db_sync_util [-] OVN Octavia DB sync finish. Other References ---------------- [1] Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/ [2] Octavia Glossary: https://docs.openstack.org/octavia/latest/reference/glossary.html [3] https://github.com/openvswitch/ovs/commit/612f80fa8ebf88dad2e204364c6c02b451dca36c [4] https://docs.openstack.org/api-ref/load-balancer/v2/index.html#status-codes [5] https://github.com/openvswitch/ovs/blob/d1b235d7a6246e00d4afc359071d3b6b3ed244c3/ovn/ovn-nb.ovsschema#L117 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/source/index.rst0000664000175100017510000000240115033037524021702 0ustar00mylesmyles.. Copyright 2011-2020 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Welcome to the OVN Octavia provider driver's documentation! =========================================================== .. We use different index pages for HTML and PDF documents for better TOC. Please ensure to update pdf-index.rst when you update the index below. Contents -------- .. toctree:: :maxdepth: 2 admin/index contributor/index configuration/index Search ------ * :ref:`OVN Octavia provider driver document search `: Search the contents of this document. * `OpenStack wide search `_: Search the wider set of OpenStack documentation, including forums. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/doc/source/pdf-index.rst0000664000175100017510000000133115033037524022452 0ustar00mylesmyles:orphan: .. Copyright 2011- OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. toctree:: :maxdepth: 2 admin/index contributor/index ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5109847 ovn_octavia_provider-8.1.0.dev15/etc/0000775000175100017510000000000015033037526016554 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5139847 ovn_octavia_provider-8.1.0.dev15/etc/octavia/0000775000175100017510000000000015033037526020202 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/etc/octavia/.placeholder0000664000175100017510000000000015033037524022451 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5139847 ovn_octavia_provider-8.1.0.dev15/etc/octavia/conf.d/0000775000175100017510000000000015033037526021351 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/etc/octavia/conf.d/.placeholder0000664000175100017510000000000015033037524023620 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5139847 ovn_octavia_provider-8.1.0.dev15/etc/oslo-config-generator/0000775000175100017510000000000015033037526022757 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/etc/oslo-config-generator/ovn.conf0000664000175100017510000000016015033037524024423 0ustar00mylesmyles[DEFAULT] output_file = etc/octavia/conf.d/ovn.conf.sample wrap_width = 79 namespace = octavia.api.drivers.ovn ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5149846 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/0000775000175100017510000000000015033037526022223 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/__init__.py0000664000175100017510000000000015033037524024320 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/agent.py0000664000175100017510000000566015033037524023700 0ustar00mylesmyles# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from ovsdbapp.backend.ovs_idl import connection from ovn_octavia_provider.common import config as ovn_conf from ovn_octavia_provider import event as ovn_event from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider import maintenance from ovn_octavia_provider.ovsdb import impl_idl_ovn LOG = logging.getLogger(__name__) OVN_EVENT_LOCK_NAME = "neutron_ovn_octavia_event_lock" def OvnProviderAgent(exit_event): # NOTE (froyo): Move inside class in order to avoid # the issues on test scope colliding with Neutron # already registered options when this register was # called from outside of the class a soon this module # was imported, also to cover requirement from # OvnProviderHelper and intra references modules ovn_conf.register_opts() helper = ovn_helper.OvnProviderHelper() events = [ovn_event.LogicalRouterPortEvent(helper), ovn_event.LogicalSwitchPortUpdateEvent(helper)] sb_events = [ovn_event.ServiceMonitorUpdateEvent(helper)] # NOTE(mjozefcz): This API is only for handling OVSDB events! ovn_nb_idl_for_events = impl_idl_ovn.OvnNbIdlForLb( event_lock_name=OVN_EVENT_LOCK_NAME) ovn_nb_idl_for_events.notify_handler.watch_events(events) c = connection.Connection(ovn_nb_idl_for_events, ovn_conf.get_ovn_ovsdb_timeout()) c.start() ovn_sb_idl_for_events = impl_idl_ovn.OvnSbIdlForLb( event_lock_name=OVN_EVENT_LOCK_NAME) ovn_sb_idl_for_events.notify_handler.watch_events(sb_events) ovn_sb_idl_for_events.start() # NOTE(froyo): Maintenance task initialization added here # as it will be a long life task managed through the Octavia # driver agent -- unlike the OVNProviderDriver which is a # short life service invocated by Octavia API. maintenance_thread = maintenance.MaintenanceThread() maintenance_thread.add_periodics( maintenance.DBInconsistenciesPeriodics()) maintenance_thread.start() LOG.info('OVN provider agent has started.') exit_event.wait() LOG.info('OVN provider agent is exiting.') ovn_nb_idl_for_events.notify_handler.unwatch_events(events) c.stop() ovn_sb_idl_for_events.notify_handler.unwatch_events(sb_events) ovn_sb_idl_for_events.stop() maintenance_thread.stop() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5149846 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/cmd/0000775000175100017510000000000015033037526022766 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/cmd/__init__.py0000664000175100017510000000000015033037524025063 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/cmd/octavia_ovn_db_sync_util.py0000664000175100017510000000341015033037524030402 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging from ovn_octavia_provider.common import config as ovn_conf from ovn_octavia_provider import driver CONF = cfg.CONF LOG = logging.getLogger(__name__) def setup_conf(): conf = cfg.CONF ovn_conf.register_opts() logging.register_options(CONF) try: CONF(project='octavia') except TypeError: LOG.error('Error parsing the configuration values. Please verify.') raise return conf def main(): """Main method for syncing Octavia LBs (OVN provider) with OVN NB DB. This script provides a utility for syncing the OVN Northbound Database with the Octavia database. """ setup_conf() logging.setup(CONF, 'octavia_ovn_db_sync_util') # Method can be call like `octavia-ovn-db-sync-util --debug` LOG.info("OVN Octavia DB sync start.") args = sys.argv[1:] lb_filters = {'provider': 'ovn'} if '--debug' in args: cfg.CONF.set_override('debug', True) args.remove('--debug') else: cfg.CONF.set_override('debug', False) ovn_driver = driver.OvnProviderDriver() ovn_driver.do_sync(**lb_filters) LOG.info("OVN Octavia DB sync finish.") ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5159845 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/common/0000775000175100017510000000000015033037526023513 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/common/clients.py0000664000175100017510000001701615033037524025531 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import exceptions as ks_exceptions from keystoneauth1 import loading as ks_loading from octavia_lib.api.drivers import exceptions as driver_exceptions import openstack from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from ovn_octavia_provider.common import constants from ovn_octavia_provider.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF class KeystoneSession(): def __init__(self, section=constants.SERVICE_AUTH): self._session = None self._auth = None self.section = section @property def session(self): """Initialize a Keystone session. :return: a Keystone Session object """ if not self._session: self._session = ks_loading.load_session_from_conf_options( cfg.CONF, self.section, auth=self.auth) return self._session @property def auth(self): if not self._auth: try: self._auth = ks_loading.load_auth_from_conf_options( cfg.CONF, self.section) except ks_exceptions.auth_plugins.MissingRequiredOptions as e: if self.section == constants.SERVICE_AUTH: raise e # NOTE(gthiemonge): MissingRequiredOptions is raised: there is # one or more missing auth options in the config file. It may # be due to the migration from python-neutronclient to # openstacksdk. # With neutronclient, most of the auth settings were in # [service_auth] with a few overrides in [neutron], # but with openstacksdk, we have all the auth settings in the # [neutron] section. In order to support smooth upgrades, in # case those options are missing, we override the undefined # options with the existing settings from [service_auth]. # This code should be removed when all the deployment tools set # the correct options in [neutron] # The config options are lazily registered/loaded by keystone, # it means that we cannot get/set them before invoking # 'load_auth_from_conf_options' on 'service_auth'. ks_loading.load_auth_from_conf_options( cfg.CONF, constants.SERVICE_AUTH) config = getattr(cfg.CONF, self.section) for opt in config: # For each option in the [neutron] section, get its setting # location, if the location is 'opt_default' or # 'set_default', it means that the option is not configured # in the config file, it should be replaced with the one # from [service_auth] loc = cfg.CONF.get_location(opt, self.section) if not loc or loc.location in (cfg.Locations.opt_default, cfg.Locations.set_default): if hasattr(cfg.CONF.service_auth, opt): cur_value = getattr(config, opt) value = getattr(cfg.CONF.service_auth, opt) if value != cur_value: log_value = (value if opt != "password" else "") LOG.debug("Overriding [%s].%s with '%s'", self.section, opt, log_value) cfg.CONF.set_override(opt, value, self.section) # Now we can call load_auth_from_conf_options for this specific # service with the newly defined options. self._auth = ks_loading.load_auth_from_conf_options( cfg.CONF, self.section) return self._auth class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class NeutronAuth(metaclass=Singleton): def __init__(self): """Create neutron client object.""" try: ksession = KeystoneSession('neutron') kwargs = {'region_name': CONF.neutron.region_name} try: interface = CONF.neutron.valid_interfaces[0] except (TypeError, LookupError): interface = CONF.neutron.valid_interfaces if interface: kwargs['interface'] = interface if CONF.neutron.endpoint_override: kwargs['network_endpoint_override'] = ( CONF.neutron.endpoint_override) if CONF.neutron.endpoint_override.startswith("https"): kwargs['insecure'] = CONF.neutron.insecure kwargs['cacert'] = CONF.neutron.cafile self.network_proxy = openstack.connection.Connection( session=ksession.session, **kwargs).network except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Error creating Neutron client.") def get_neutron_client(): try: return NeutronAuth().network_proxy except Exception as e: msg = _('Cannot initialize OpenStackSDK. Exception: %s. ' 'Please verify Neutron service configuration ' 'in Octavia API configuration.') % e raise driver_exceptions.DriverError( operator_fault_string=msg) class OctaviaAuth(metaclass=Singleton): def __init__(self): """Create Octavia client object.""" try: ksession = KeystoneSession() kwargs = {'region_name': CONF.service_auth.region_name} # TODO(ricolin) `interface` option don't take list as option yet. # We can move away from this when openstacksdk no longer depends # on `interface`. try: interface = CONF.service_auth.valid_interfaces[0] except (TypeError, LookupError): interface = CONF.service_auth.valid_interfaces if interface: kwargs['interface'] = interface self.loadbalancer_proxy = openstack.connection.Connection( session=ksession.session, **kwargs).load_balancer except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Error creating Octavia client.") def get_octavia_client(): try: return OctaviaAuth().loadbalancer_proxy except Exception as e: msg = _('Cannot initialize OpenStackSDK. Exception: %s. ' 'Please verify service_auth configuration ' 'in Octavia API configuration.') % e raise driver_exceptions.DriverError( operator_fault_string=msg) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/common/config.py0000664000175100017510000002166615033037524025343 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from ovn_octavia_provider.i18n import _ LOG = logging.getLogger(__name__) ovn_opts = [ cfg.ListOpt('ovn_nb_connection', default=['tcp:127.0.0.1:6641'], item_type=types.String(regex=r'^(tcp|ssl|unix):.+'), help=_('The connection string for the OVN_Northbound OVSDB.\n' 'Use tcp:IP:PORT for TCP connection.\n' 'Use ssl:IP:PORT for SSL connection. The ' 'ovn_nb_private_key, ovn_nb_certificate and ' 'ovn_nb_ca_cert are mandatory.\n' 'Use unix:FILE for unix domain socket connection.')), cfg.StrOpt('ovn_nb_private_key', default='', help=_('The PEM file with private key for SSL connection to ' 'OVN-NB-DB')), cfg.StrOpt('ovn_nb_certificate', default='', help=_('The PEM file with certificate that certifies the ' 'private key specified in ovn_nb_private_key')), cfg.StrOpt('ovn_nb_ca_cert', default='', help=_('The PEM file with CA certificate that OVN should use to' ' verify certificates presented to it by SSL peers')), cfg.ListOpt('ovn_sb_connection', default=['tcp:127.0.0.1:6642'], item_type=types.String(regex=r'^(tcp|ssl|unix):.+'), help=_('The connection string for the OVN_Southbound OVSDB.\n' 'Use tcp:IP:PORT for TCP connection.\n' 'Use ssl:IP:PORT for SSL connection. The ' 'ovn_sb_private_key, ovn_sb_certificate and ' 'ovn_sb_ca_cert are mandatory.\n' 'Use unix:FILE for unix domain socket connection.')), cfg.StrOpt('ovn_sb_private_key', default='', help=_('The PEM file with private key for SSL connection to ' 'OVN-SB-DB')), cfg.StrOpt('ovn_sb_certificate', default='', help=_('The PEM file with certificate that certifies the ' 'private key specified in ovn_sb_private_key')), cfg.StrOpt('ovn_sb_ca_cert', default='', help=_('The PEM file with CA certificate that OVN should use to' ' verify certificates presented to it by SSL peers')), cfg.IntOpt('ovsdb_connection_timeout', default=180, help=_('Timeout in seconds for the OVSDB ' 'connection transaction')), cfg.IntOpt('ovsdb_retry_max_interval', default=180, help=_('Max interval in seconds between ' 'each retry to get the OVN NB and SB IDLs')), cfg.IntOpt('ovsdb_probe_interval', min=0, default=60000, help=_('The probe interval in for the OVSDB session in ' 'milliseconds. If this is zero, it disables the ' 'connection keepalive feature. If non-zero the value ' 'will be forced to at least 1000 milliseconds. Defaults ' 'to 60 seconds.')), ] neutron_opts = [ cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' 'in the keystone catalog.'), deprecated_for_removal=True, deprecated_reason=_('The endpoint_override option defined by ' 'keystoneauth1 is the new name for this ' 'option.'), deprecated_since='Antelope'), cfg.StrOpt('endpoint_type', help=_('Endpoint interface in identity ' 'service to use'), deprecated_for_removal=True, deprecated_reason=_('This option was replaced by the ' 'valid_interfaces option defined by ' 'keystoneauth.'), deprecated_since='Antelope'), cfg.StrOpt('ca_certificates_file', help=_('CA certificates file path'), deprecated_for_removal=True, deprecated_reason=_('The cafile option defined by ' 'keystoneauth1 is the new name for this ' 'option.'), deprecated_since='Antelope'), ] def handle_neutron_deprecations(): # Apply neutron deprecated options to their new setting if needed # Basicaly: if the value of the deprecated option is not the default: # * convert it to a valid "new" value if needed # * set it as the default for the new option # Thus [neutron]. has an higher precedence than # [neutron]. loc = cfg.CONF.get_location('endpoint', 'neutron') if loc and loc.location != cfg.Locations.opt_default: cfg.CONF.set_default('endpoint_override', cfg.CONF.neutron.endpoint, 'neutron') loc = cfg.CONF.get_location('endpoint_type', 'neutron') if loc and loc.location != cfg.Locations.opt_default: endpoint_type = cfg.CONF.neutron.endpoint_type.replace('URL', '') cfg.CONF.set_default('valid_interfaces', [endpoint_type], 'neutron') loc = cfg.CONF.get_location('ca_certificates_file', 'neutron') if loc and loc.location != cfg.Locations.opt_default: cfg.CONF.set_default('cafile', cfg.CONF.neutron.ca_certificates_file, 'neutron') def register_opts(): # NOTE (froyo): just to not try to re-register options already done # by Neutron, specially in test scope, that will get a DuplicateOptError missing_ovn_opts = ovn_opts try: neutron_registered_opts = [opt for opt in cfg.CONF.ovn] missing_ovn_opts = [opt for opt in ovn_opts if opt.name not in neutron_registered_opts] except cfg.NoSuchOptError: LOG.info('Not found any opts under group ovn registered by Neutron') # Do the same for neutron options that have been already registered by # Octavia missing_neutron_opts = neutron_opts try: neutron_registered_opts = [opt for opt in cfg.CONF.neutron] missing_neutron_opts = [opt for opt in neutron_opts if opt.name not in neutron_registered_opts] except cfg.NoSuchOptError: LOG.info('Not found any opts under group neutron') cfg.CONF.register_opts(missing_ovn_opts, group='ovn') cfg.CONF.register_opts(missing_neutron_opts, group='neutron') ks_loading.register_auth_conf_options(cfg.CONF, 'service_auth') ks_loading.register_session_conf_options(cfg.CONF, 'service_auth') ks_loading.register_adapter_conf_options(cfg.CONF, 'service_auth', include_deprecated=False) ks_loading.register_auth_conf_options(cfg.CONF, 'neutron') ks_loading.register_session_conf_options(cfg.CONF, 'neutron') ks_loading.register_adapter_conf_options(cfg.CONF, 'neutron', include_deprecated=False) # Override default auth_type for plugins with the default from service_auth auth_type = cfg.CONF.service_auth.auth_type cfg.CONF.set_default('auth_type', auth_type, 'neutron') handle_neutron_deprecations() def list_opts(): return [ ('ovn', ovn_opts), ('neutron', neutron_opts), ] def get_ovn_nb_connection(): return ','.join(cfg.CONF.ovn.ovn_nb_connection) def get_ovn_nb_private_key(): return cfg.CONF.ovn.ovn_nb_private_key def get_ovn_nb_certificate(): return cfg.CONF.ovn.ovn_nb_certificate def get_ovn_nb_ca_cert(): return cfg.CONF.ovn.ovn_nb_ca_cert def get_ovn_sb_connection(): return ','.join(cfg.CONF.ovn.ovn_sb_connection) def get_ovn_sb_private_key(): return cfg.CONF.ovn.ovn_sb_private_key def get_ovn_sb_certificate(): return cfg.CONF.ovn.ovn_sb_certificate def get_ovn_sb_ca_cert(): return cfg.CONF.ovn.ovn_sb_ca_cert def get_ovn_ovsdb_timeout(): return cfg.CONF.ovn.ovsdb_connection_timeout def get_ovn_ovsdb_retry_max_interval(): return cfg.CONF.ovn.ovsdb_retry_max_interval def get_ovn_ovsdb_probe_interval(): return cfg.CONF.ovn.ovsdb_probe_interval ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/common/constants.py0000664000175100017510000001160015033037524026075 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia_lib.common import constants # TODO(mjozefcz): Use those variables from neutron-lib once released. LRP_PREFIX = "lrp-" OVN_NAME_PREFIX = "neutron-" LB_HM_PORT_PREFIX = "ovn-lb-hm-" LB_VIP_PORT_PREFIX = "ovn-lb-vip-" LB_VIP_ADDIT_PORT_PREFIX = "ovn-lb-vip-additional-" OVN_PORT_NAME_EXT_ID_KEY = 'neutron:port_name' OVN_ROUTER_NAME_EXT_ID_KEY = 'neutron:router_name' OVN_PORT_FIP_EXT_ID_KEY = 'neutron:port_fip' OVN_SUBNET_EXT_ID_KEY = 'neutron:subnet_id' OVN_SUBNET_EXT_IDS_KEY = 'neutron:subnet_ids' OVN_NETWORK_NAME_EXT_ID_KEY = 'neutron:network_name' OVN_PROJECT_EXT_ID_KEY = 'neutron:project_id' OVN_SG_IDS_EXT_ID_KEY = 'neutron:security_group_ids' OVN_DEVICE_OWNER_EXT_ID_KEY = 'neutron:device_owner' OVN_FIP_EXT_ID_KEY = 'neutron:fip_id' OVN_FIP_PORT_EXT_ID_KEY = 'neutron:fip_port_id' OVN_GW_PORT_EXT_ID_KEY = 'neutron:gw_port_id' OVN_PORT_CIDR_EXT_ID_KEY = 'neutron:cidrs' OVN_MEMBER_STATUS_KEY = 'neutron:member_status' OVN_ROUTER_IS_EXT_GW = 'neutron:is_ext_gw' # TODO(froyo): Use from neutron-lib once released. OVN_LB_HM_PORT_DISTRIBUTED = 'ovn-lb-hm:distributed' LB_EXT_IDS_LS_REFS_KEY = 'ls_refs' LB_EXT_IDS_LR_REF_KEY = 'lr_ref' LB_EXT_IDS_POOL_PREFIX = 'pool_' LB_EXT_IDS_LISTENER_PREFIX = 'listener_' LB_EXT_IDS_MEMBER_PREFIX = 'member_' LB_EXT_IDS_HM_KEY = 'octavia:healthmonitor' LB_EXT_IDS_HM_POOL_KEY = 'octavia:pool_id' LB_EXT_IDS_HM_VIP = 'octavia:vip' LB_EXT_IDS_HMS_KEY = 'octavia:healthmonitors' # NOTE(froyo):from additional-vips feature we will mantain the old ones for # backward compatibility LB_EXT_IDS_VIP_KEY = 'neutron:vip' LB_EXT_IDS_ADDIT_VIP_KEY = 'neutron:additional_vips' LB_EXT_IDS_VIP_PORT_ID_KEY = 'neutron:vip_port_id' LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY = 'neutron:additional_vip_port_ids' LB_EXT_IDS_VIP_FIP_KEY = 'neutron:vip_fip' LB_EXT_IDS_ADDIT_VIP_FIP_KEY = 'neutron:additional_vip_fips' PORT_FORWARDING_PLUGIN = 'port_forwarding_plugin' # Auth sections SERVICE_AUTH = 'service_auth' # Request type constants REQ_TYPE_LB_CREATE = 'lb_create' REQ_TYPE_LB_DELETE = 'lb_delete' REQ_TYPE_LB_UPDATE = 'lb_update' REQ_TYPE_LISTENER_CREATE = 'listener_create' REQ_TYPE_LISTENER_DELETE = 'listener_delete' REQ_TYPE_LISTENER_UPDATE = 'listener_update' REQ_TYPE_POOL_CREATE = 'pool_create' REQ_TYPE_POOL_DELETE = 'pool_delete' REQ_TYPE_POOL_UPDATE = 'pool_update' REQ_TYPE_MEMBER_CREATE = 'member_create' REQ_TYPE_MEMBER_DELETE = 'member_delete' REQ_TYPE_MEMBER_UPDATE = 'member_update' REQ_TYPE_LB_CREATE_LRP_ASSOC = 'lb_create_lrp_assoc' REQ_TYPE_LB_DELETE_LRP_ASSOC = 'lb_delete_lrp_assoc' REQ_TYPE_HANDLE_VIP_FIP = 'handle_vip_fip' REQ_TYPE_HANDLE_MEMBER_DVR = 'handle_member_dvr' REQ_TYPE_HM_CREATE = 'hm_create' REQ_TYPE_HM_UPDATE = 'hm_update' REQ_TYPE_HM_DELETE = 'hm_delete' REQ_TYPE_HM_UPDATE_EVENT = 'hm_update_event' REQ_TYPE_EXIT = 'exit' # Request information constants REQ_INFO_ACTION_ASSOCIATE = 'associate' REQ_INFO_ACTION_SYNC = 'sync' REQ_INFO_ACTION_DISASSOCIATE = 'disassociate' REQ_INFO_MEMBER_ADDED = 'member_added' REQ_INFO_MEMBER_DELETED = 'member_deleted' # Disabled resources have a ':D' at the end DISABLED_RESOURCE_SUFFIX = 'D' # This driver only supports TCP, UDP and SCTP, with a single LB algorithm OVN_NATIVE_LB_PROTOCOLS = [constants.PROTOCOL_TCP, constants.PROTOCOL_UDP, constants.PROTOCOL_SCTP, ] OVN_NATIVE_LB_ALGORITHMS = [constants.LB_ALGORITHM_SOURCE_IP_PORT, ] # This driver only supports UDP Connect and TCP health monitors SUPPORTED_HEALTH_MONITOR_TYPES = [constants.HEALTH_MONITOR_UDP_CONNECT, constants.HEALTH_MONITOR_TCP] # Prepended to exception log messages EXCEPTION_MSG = "Exception occurred during %s" # Used in functional tests LR_REF_KEY_HEADER = 'neutron-' # LB selection fields to represent LB algorithm LB_SELECTION_FIELDS_MAP = { constants.LB_ALGORITHM_SOURCE_IP_PORT: ["ip_dst", "ip_src", "tp_dst", "tp_src"], constants.LB_ALGORITHM_SOURCE_IP: ["ip_src", "ip_dst"], None: ["ip_src", "ip_dst", "tp_src", "tp_dst"], } # HM events status HM_EVENT_MEMBER_PORT_ONLINE = ['online'] HM_EVENT_MEMBER_PORT_OFFLINE = ['offline'] # max timeout for request MAX_TIMEOUT_REQUEST = 5 AFFINITY_TIMEOUT = "affinity_timeout" # This driver only supports SOURCE_IP sesssion persistency option OVN_NATIVE_SESSION_PERSISTENCE = [constants.SESSION_PERSISTENCE_SOURCE_IP] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/common/exceptions.py0000664000175100017510000000243115033037524026244 0ustar00mylesmyles# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from octavia_lib.api.drivers import exceptions as driver_exceptions from ovn_octavia_provider.i18n import _ class RevisionConflict(n_exc.NeutronException): message = _('OVN revision number for %(resource_id)s (type: ' '%(resource_type)s) is equal or higher than the given ' 'resource. Skipping update') class IPVersionsMixingNotSupportedError( driver_exceptions.UnsupportedOptionError): user_fault_string = _('OVN provider does not support mixing IPv4/IPv6 ' 'configuration within the same Load Balancer.') operator_fault_string = user_fault_string ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/common/utils.py0000664000175100017510000000576415033037524025237 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_utils import netutils import tenacity from ovn_octavia_provider.common import config from ovn_octavia_provider.common import constants def get_uuid(dynamic_string): # If it exists get the UUID from any string uuid_pattern = r"[\da-fA-F]{8}-(?:[\da-fA-F]{4}-){3}[\da-fA-F]{12}" uuid_match = re.search(uuid_pattern, dynamic_string) if uuid_match: return uuid_match.group() return '' def ovn_uuid(name): # Get the UUID of a neutron OVN entry (neutron-) return name.replace(constants.OVN_NAME_PREFIX, '') def ovn_name(id): # The name of the OVN entry will be neutron- # This is due to the fact that the OVN application checks if the name # is a UUID. If so then there will be no matches. # We prefix the UUID to enable us to use the Neutron UUID when # updating, deleting etc. # To be sure that just one prefix is used, we will check it before # return concatenation. if not id.startswith(constants.OVN_NAME_PREFIX): return constants.OVN_NAME_PREFIX + '%s' % id return id def ovn_lrouter_port_name(id): # The name of the OVN lrouter port entry will be lrp- # This is to distinguish with the name of the connected lswitch patch port, # which is named with neutron port uuid, so that OVS patch ports are # generated properly. The pairing patch port names will be: # - patch-lrp--to- # - patch--to-lrp- # lrp stands for Logical Router Port return constants.LRP_PREFIX + '%s' % id def remove_macs_from_lsp_addresses(addresses): """Remove the mac addreses from the Logical_Switch_Port addresses column. :param addresses: The list of addresses from the Logical_Switch_Port. Example: ["80:fa:5b:06:72:b7 158.36.44.22", "ff:ff:ff:ff:ff:ff 10.0.0.2"] :returns: A list of IP addesses (v4 and v6) """ ip_list = [] for addr in addresses: ip_list.extend([x for x in addr.split() if (netutils.is_valid_ipv4(x) or netutils.is_valid_ipv6(x))]) return ip_list def retry(max_=None): def inner(func): def wrapper(*args, **kwargs): local_max = max_ or config.get_ovn_ovsdb_retry_max_interval() return tenacity.retry( wait=tenacity.wait_exponential(max=local_max), reraise=True)(func)(*args, **kwargs) return wrapper return inner ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/driver.py0000664000175100017510000012615415033037524024077 0ustar00mylesmyles# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import netaddr from octavia_lib.api.drivers import data_models as o_datamodels from octavia_lib.api.drivers import exceptions as driver_exceptions from octavia_lib.api.drivers import provider_base as driver_base from octavia_lib.common import constants from oslo_log import log as logging from ovsdbapp.backend.ovs_idl import idlutils from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import config as ovn_conf # TODO(mjozefcz): Start consuming const and utils # from neutron-lib once released. from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider.common import exceptions as ovn_exc from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.i18n import _ LOG = logging.getLogger(__name__) class OvnProviderDriver(driver_base.ProviderDriver): def __init__(self): super().__init__() # NOTE (froyo): Move inside init method in order to # avoid the issues on test scope colliding with Neutron # already registered options when this register was # called from outside of the class a soon this module # was imported, also to cover requirement from # OvnProviderHelper and intra references modules ovn_conf.register_opts() self._ovn_helper = ovn_helper.OvnProviderHelper(notifier=False) def __del__(self): self._ovn_helper.shutdown() def _is_health_check_supported(self): return self._ovn_helper.ovn_nbdb_api.is_col_present( 'Load_Balancer', 'health_check') def _check_for_supported_protocols(self, protocol): if protocol not in ovn_const.OVN_NATIVE_LB_PROTOCOLS: msg = _('OVN provider does not support %s protocol') % protocol raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def _check_for_supported_algorithms(self, algorithm): if algorithm not in ovn_const.OVN_NATIVE_LB_ALGORITHMS: msg = _('OVN provider does not support %s algorithm') % algorithm raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def _check_for_supported_session_persistence(self, session): if (session and session.get("type") not in ovn_const.OVN_NATIVE_SESSION_PERSISTENCE): msg = _('OVN provider does not support %s session persistence. ' 'Only SOURCE_IP type is supported.') % session.type raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def _check_for_allowed_cidrs(self, allowed_cidrs): # TODO(haleyb): add support for this if isinstance(allowed_cidrs, o_datamodels.UnsetType): allowed_cidrs = [] if allowed_cidrs: msg = _('OVN provider does not support allowed_cidrs option') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def _get_loadbalancer_request_info(self, loadbalancer): admin_state_up = loadbalancer.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': loadbalancer.loadbalancer_id, 'vip_address': loadbalancer.vip_address, 'vip_network_id': loadbalancer.vip_network_id, 'admin_state_up': admin_state_up} if not isinstance(loadbalancer.additional_vips, o_datamodels.UnsetType): request_info[constants.ADDITIONAL_VIPS] = \ loadbalancer.additional_vips return request_info def _get_listener_request_info(self, listener): self._check_for_supported_protocols(listener.protocol) self._check_for_allowed_cidrs(listener.allowed_cidrs) admin_state_up = listener.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': listener.listener_id, 'protocol': listener.protocol, 'loadbalancer_id': listener.loadbalancer_id, 'protocol_port': listener.protocol_port, 'default_pool_id': listener.default_pool_id, 'admin_state_up': admin_state_up} return request_info def _get_pool_request_info(self, pool): self._check_for_supported_protocols(pool.protocol) self._check_for_supported_algorithms(pool.lb_algorithm) admin_state_up = pool.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': pool.pool_id, 'loadbalancer_id': pool.loadbalancer_id, 'protocol': pool.protocol, 'lb_algorithm': pool.lb_algorithm, 'listener_id': pool.listener_id, 'admin_state_up': admin_state_up} if not isinstance( pool.session_persistence, o_datamodels.UnsetType): self._check_for_supported_session_persistence( pool.session_persistence) request_info['session_persistence'] = pool.session_persistence return request_info def _get_member_request_info(self, member, create=True): # Validate monitoring options if present admin_state_up = None if create: self._check_member_monitor_options(member) if self._ip_version_differs(member): raise ovn_exc.IPVersionsMixingNotSupportedError() admin_state_up = member.admin_state_up subnet_id = member.subnet_id if (isinstance(subnet_id, o_datamodels.UnsetType) or not subnet_id): subnet_id, subnet_cidr = self._ovn_helper._get_subnet_from_pool( member.pool_id) if not (subnet_id and self._ovn_helper._check_ip_in_subnet(member.address, subnet_cidr)): msg = _('Subnet is required, or Loadbalancer associated with ' 'Pool must have a subnet, for Member creation ' 'with OVN Provider Driver if it is not the same as ' 'LB VIP subnet') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': member.member_id, 'address': member.address, 'protocol_port': member.protocol_port, 'pool_id': member.pool_id, 'subnet_id': subnet_id} if admin_state_up and create: request_info['admin_state_up'] = admin_state_up return request_info def _get_healthmonitor_request_info(self, healthmonitor): self._validate_hm_support(healthmonitor) admin_state_up = healthmonitor.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': healthmonitor.healthmonitor_id, 'pool_id': healthmonitor.pool_id, 'type': healthmonitor.type, 'interval': healthmonitor.delay, 'timeout': healthmonitor.timeout, 'failure_count': healthmonitor.max_retries_down, 'success_count': healthmonitor.max_retries, 'admin_state_up': admin_state_up} return request_info def loadbalancer_create(self, loadbalancer): request = {'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': self._get_loadbalancer_request_info( loadbalancer)} self._ovn_helper.add_request(request) if not isinstance(loadbalancer.listeners, o_datamodels.UnsetType): for listener in loadbalancer.listeners: self.listener_create(listener) if not isinstance(loadbalancer.pools, o_datamodels.UnsetType): for pool in loadbalancer.pools: self.pool_create(pool) for member in pool.members: if not member.subnet_id: member.subnet_id = loadbalancer.vip_subnet_id self.member_create(member) def loadbalancer_delete(self, loadbalancer, cascade=False): request_info = {'id': loadbalancer.loadbalancer_id, 'cascade': cascade} request = {'type': ovn_const.REQ_TYPE_LB_DELETE, 'info': request_info} self._ovn_helper.add_request(request) def loadbalancer_failover(self, loadbalancer_id): msg = _('OVN provider does not support loadbalancer failover') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): request_info = {'id': new_loadbalancer.loadbalancer_id} if not isinstance( new_loadbalancer.admin_state_up, o_datamodels.UnsetType): request_info['admin_state_up'] = new_loadbalancer.admin_state_up request = {'type': ovn_const.REQ_TYPE_LB_UPDATE, 'info': request_info} self._ovn_helper.add_request(request) # Pool def pool_create(self, pool): self._check_for_supported_protocols(pool.protocol) self._check_for_supported_algorithms(pool.lb_algorithm) admin_state_up = pool.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': pool.pool_id, 'loadbalancer_id': pool.loadbalancer_id, 'protocol': pool.protocol, 'lb_algorithm': pool.lb_algorithm, 'listener_id': pool.listener_id, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': request_info} if not isinstance( pool.session_persistence, o_datamodels.UnsetType): self._check_for_supported_session_persistence( pool.session_persistence) request['info']['session_persistence'] = pool.session_persistence self._ovn_helper.add_request(request) if pool.healthmonitor is not None and not isinstance( pool.healthmonitor, o_datamodels.UnsetType): self.health_monitor_create(pool.healthmonitor) def pool_delete(self, pool): if pool.healthmonitor: self.health_monitor_delete(pool.healthmonitor) for member in pool.members: self.member_delete(member) request_info = {'id': pool.pool_id, 'protocol': pool.protocol, 'loadbalancer_id': pool.loadbalancer_id} request = {'type': ovn_const.REQ_TYPE_POOL_DELETE, 'info': request_info} self._ovn_helper.add_request(request) def pool_update(self, old_pool, new_pool): if not isinstance(new_pool.protocol, o_datamodels.UnsetType): self._check_for_supported_protocols(new_pool.protocol) if not isinstance(new_pool.lb_algorithm, o_datamodels.UnsetType): self._check_for_supported_algorithms(new_pool.lb_algorithm) request_info = {'id': old_pool.pool_id, 'protocol': old_pool.protocol, 'loadbalancer_id': old_pool.loadbalancer_id} if not isinstance(new_pool.admin_state_up, o_datamodels.UnsetType): request_info['admin_state_up'] = new_pool.admin_state_up if not isinstance( new_pool.session_persistence, o_datamodels.UnsetType): self._check_for_supported_session_persistence( new_pool.session_persistence) request_info['session_persistence'] = ( new_pool.session_persistence) request = {'type': ovn_const.REQ_TYPE_POOL_UPDATE, 'info': request_info} self._ovn_helper.add_request(request) def listener_create(self, listener): self._check_for_supported_protocols(listener.protocol) self._check_for_allowed_cidrs(listener.allowed_cidrs) admin_state_up = listener.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': listener.listener_id, 'protocol': listener.protocol, 'loadbalancer_id': listener.loadbalancer_id, 'protocol_port': listener.protocol_port, 'default_pool_id': listener.default_pool_id, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': request_info} self._ovn_helper.add_request(request) def listener_delete(self, listener): request_info = {'id': listener.listener_id, 'loadbalancer_id': listener.loadbalancer_id, 'protocol_port': listener.protocol_port, 'protocol': listener.protocol} request = {'type': ovn_const.REQ_TYPE_LISTENER_DELETE, 'info': request_info} self._ovn_helper.add_request(request) def listener_update(self, old_listener, new_listener): self._check_for_allowed_cidrs(new_listener.allowed_cidrs) request_info = {'id': new_listener.listener_id, 'loadbalancer_id': old_listener.loadbalancer_id, 'protocol': old_listener.protocol, 'protocol_port': old_listener.protocol_port} if not isinstance(new_listener.admin_state_up, o_datamodels.UnsetType): request_info['admin_state_up'] = new_listener.admin_state_up if not isinstance(new_listener.default_pool_id, o_datamodels.UnsetType): request_info['default_pool_id'] = new_listener.default_pool_id request = {'type': ovn_const.REQ_TYPE_LISTENER_UPDATE, 'info': request_info} self._ovn_helper.add_request(request) # Member def _check_monitor_options(self, member): if (isinstance(member.monitor_address, o_datamodels.UnsetType) and isinstance(member.monitor_port, o_datamodels.UnsetType)): return False if member.monitor_address or member.monitor_port: return True return False def _check_member_monitor_options(self, member): if self._check_monitor_options(member): msg = _('OVN Load Balancer does not support different member ' 'monitor address or port.') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def _ip_version_differs(self, member): _, ovn_lb = self._ovn_helper._find_ovn_lb_by_pool_id(member.pool_id) if not ovn_lb: return False lb_vips = [ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_KEY)] if ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY in ovn_lb.external_ids: lb_vips.extend(ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY).split(',')) # NOTE(froyo): Allow mixing member IP version when VIP LB and any # additional vip is also mixing version vip_version = netaddr.IPNetwork(lb_vips[0]).version vips_mixed = any(netaddr.IPNetwork(vip).version != vip_version for vip in lb_vips if vip) if vips_mixed: return False else: return vip_version != (netaddr.IPNetwork(member.address).version) def member_create(self, member): # Validate monitoring options if present self._check_member_monitor_options(member) if self._ip_version_differs(member): raise ovn_exc.IPVersionsMixingNotSupportedError() admin_state_up = member.admin_state_up subnet_id = member.subnet_id if (isinstance(subnet_id, o_datamodels.UnsetType) or not subnet_id): subnet_id, subnet_cidr = self._ovn_helper._get_subnet_from_pool( member.pool_id) if not (subnet_id and self._ovn_helper._check_ip_in_subnet(member.address, subnet_cidr)): msg = _('Subnet is required, or Loadbalancer associated with ' 'Pool must have a subnet, for Member creation ' 'with OVN Provider Driver if it is not the same as ' 'LB VIP subnet') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': member.member_id, 'address': member.address, 'protocol_port': member.protocol_port, 'pool_id': member.pool_id, 'subnet_id': subnet_id, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_MEMBER_CREATE, 'info': request_info} self._ovn_helper.add_request(request) # NOTE(mjozefcz): If LB has FIP on VIP # and member has FIP we need to centralize # traffic for member. request_info = {'id': member.member_id, 'address': member.address, 'pool_id': member.pool_id, 'subnet_id': subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} request = {'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': request_info} self._ovn_helper.add_request(request) def member_delete(self, member): # NOTE(froyo): OVN provider allow to create member without param # subnet_id, in that case the driver search it according to the # pool_id, but it is not propagated to Octavia. In this case, if # the member is deleted, Octavia send the object without subnet_id. subnet_id = member.subnet_id if (isinstance(subnet_id, o_datamodels.UnsetType) or not subnet_id): subnet_id, subnet_cidr = self._ovn_helper._get_subnet_from_pool( member.pool_id) if not (subnet_id and self._ovn_helper._check_ip_in_subnet(member.address, subnet_cidr)): msg = _('Subnet is required, or Loadbalancer associated with ' 'Pool must have a subnet, for Member deletion if it is' 'with OVN Provider Driver if it is not the same as ' 'LB VIP subnet') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) request_info = {'id': member.member_id, 'address': member.address, 'protocol_port': member.protocol_port, 'pool_id': member.pool_id, 'subnet_id': subnet_id} request = {'type': ovn_const.REQ_TYPE_MEMBER_DELETE, 'info': request_info} self._ovn_helper.add_request(request) # NOTE(mjozefcz): If LB has FIP on VIP # and member had FIP we can decentralize # the traffic now. request_info = {'id': member.member_id, 'address': member.address, 'pool_id': member.pool_id, 'subnet_id': subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_DELETED} request = {'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': request_info} self._ovn_helper.add_request(request) def member_update(self, old_member, new_member): # Validate monitoring options if present self._check_member_monitor_options(new_member) if new_member.address and self._ip_version_differs(new_member): raise ovn_exc.IPVersionsMixingNotSupportedError() request_info = {'id': new_member.member_id, 'address': old_member.address, 'protocol_port': old_member.protocol_port, 'pool_id': old_member.pool_id, 'old_admin_state_up': old_member.admin_state_up} if not isinstance(new_member.admin_state_up, o_datamodels.UnsetType): request_info['admin_state_up'] = new_member.admin_state_up request = {'type': ovn_const.REQ_TYPE_MEMBER_UPDATE, 'info': request_info} self._ovn_helper.add_request(request) def member_batch_update(self, pool_id, members): request_list = [] pool_key, ovn_lb = self._ovn_helper._find_ovn_lb_by_pool_id(pool_id) external_ids = copy.deepcopy(ovn_lb.external_ids) pool = external_ids[pool_key] existing_members = pool.split(',') if pool else [] members_to_delete = copy.copy(existing_members) pool_subnet_id = None pool_subnet_cidr = None for member in members: # NOTE(froyo): in order to keep sync with Octavia DB, we raise # not supporting exceptions as soon as posible, considering the # full request as not valid if (self._check_monitor_options(member)): msg = 'OVN provider does not support monitor options' raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) if (member.address and self._ip_version_differs(member)): raise ovn_exc.IPVersionsMixingNotSupportedError() # NOTE(froyo): if subnet_id not provided, lets try to get it # from the member pool_id subnet_id = member.subnet_id if (isinstance(subnet_id, o_datamodels.UnsetType) or not subnet_id): if not pool_subnet_id: pool_subnet_id, pool_subnet_cidr = ( self._ovn_helper._get_subnet_from_pool(pool_id)) if pool_subnet_id: if (self._ovn_helper._check_ip_in_subnet( member.address, pool_subnet_cidr)): member.subnet_id = pool_subnet_id # NOTE(mjozefcz): We need to have subnet_id information. if not member.subnet_id: msg = _('Subnet is required, or Loadbalancer associated ' 'with Pool must have a subnet, for Member ' 'batch update with OVN Provider Driver if it is ' 'not the same as LB VIP subnet') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) admin_state_up = member.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True member_info = self._ovn_helper._get_member_info(member) if member_info not in existing_members: req_type = ovn_const.REQ_TYPE_MEMBER_CREATE else: # If member exists in pool, then Update req_type = ovn_const.REQ_TYPE_MEMBER_UPDATE # Remove all updating members so only deleted ones are left members_to_delete.remove(member_info) request_info = {'id': member.member_id, 'address': member.address, 'protocol_port': member.protocol_port, 'pool_id': member.pool_id, 'subnet_id': member.subnet_id, 'admin_state_up': admin_state_up} request = {'type': req_type, 'info': request_info} request_list.append(request) for member in members_to_delete: member_info = member.split('_') member_ip, member_port, subnet_id, member_id = ( self._ovn_helper._extract_member_info(member)[0]) request_info = {'id': member_info[1], 'address': member_ip, 'protocol_port': member_port, 'pool_id': pool_id} if len(member_info) == 4: request_info['subnet_id'] = subnet_id request = {'type': ovn_const.REQ_TYPE_MEMBER_DELETE, 'info': request_info} request_list.append(request) # NOTE(mjozefcz): If LB has FIP on VIP # and member had FIP we can decentralize # the traffic now. request_info = {'id': member_id, 'address': member_ip, 'pool_id': pool_id, 'action': ovn_const.REQ_INFO_MEMBER_DELETED} if len(member_info) == 4: request_info['subnet_id'] = subnet_id request = {'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': request_info} request_list.append(request) for request in request_list: self._ovn_helper.add_request(request) def create_vip_port(self, lb_id, project_id, vip_dict, additional_vip_dicts=None): """Create the VIP port of a load balancer :param lb_id: The ID of the load balancer :param project_id: The ID of the project that owns the load balancer :param vip_dict: A dict that contains the provider VIP information ('network_id', 'port_id', 'subnet_id' and/or 'ip_address') :param additional_vip_dicts: An optional list of dicts of additional VIP. An additional VIP dict might contain the 'ip_address', 'network_id', 'port_id' and/or 'subnet_id' of the secondary VIPs. :return: a tuple that contains the VIP provider dictionary and a list of additional VIP dictionaries """ try: port, additional_ports = self._ovn_helper.create_vip_port( project_id, lb_id, vip_dict, additional_vip_dicts) vip_dict[constants.VIP_PORT_ID] = port.id vip_dict[constants.VIP_ADDRESS] = ( port['fixed_ips'][0]['ip_address']) additional_vip_port_dict = [] for additional_port in additional_ports: additional_vip_port_dict.append({ 'port_id': additional_port['id'], constants.NETWORK_ID: additional_port[constants.NETWORK_ID], constants.SUBNET_ID: additional_port['fixed_ips'][0]['subnet_id'], 'ip_address': additional_port['fixed_ips'][0]['ip_address'] }) except Exception as e: kwargs = {} for attr in ('details', 'message'): if hasattr(e, attr): value = getattr(e, attr) kwargs = {'user_fault_string': value, 'operator_fault_string': value} break raise driver_exceptions.DriverError( **kwargs) return vip_dict, additional_vip_port_dict def _validate_hm_support(self, hm, action='create'): if not self._is_health_check_supported(): msg = _('OVN Load Balancer supports Health Check provider ' 'from version 2.12. Upgrade OVN in order to use it.') raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) # type is only required for create if action == 'create': if isinstance(hm.type, o_datamodels.UnsetType): msg = _('OVN provider health monitor type not specified.') # seems this should be other than "unsupported"? raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) if hm.type not in ovn_const.SUPPORTED_HEALTH_MONITOR_TYPES: msg = (_('OVN provider does not support %s ' 'health monitor type. Supported types: %s') % (hm.type, ', '.join(ovn_const.SUPPORTED_HEALTH_MONITOR_TYPES))) raise driver_exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) def health_monitor_create(self, healthmonitor): self._validate_hm_support(healthmonitor) admin_state_up = healthmonitor.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': healthmonitor.healthmonitor_id, 'pool_id': healthmonitor.pool_id, 'type': healthmonitor.type, 'interval': healthmonitor.delay, 'timeout': healthmonitor.timeout, 'failure_count': healthmonitor.max_retries_down, 'success_count': healthmonitor.max_retries, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_HM_CREATE, 'info': request_info} self._ovn_helper.add_request(request) def health_monitor_update(self, old_healthmonitor, new_healthmonitor): self._validate_hm_support(new_healthmonitor, action='update') admin_state_up = new_healthmonitor.admin_state_up if isinstance(admin_state_up, o_datamodels.UnsetType): admin_state_up = True request_info = {'id': new_healthmonitor.healthmonitor_id, 'pool_id': old_healthmonitor.pool_id, 'interval': new_healthmonitor.delay, 'timeout': new_healthmonitor.timeout, 'failure_count': new_healthmonitor.max_retries_down, 'success_count': new_healthmonitor.max_retries, 'admin_state_up': admin_state_up} request = {'type': ovn_const.REQ_TYPE_HM_UPDATE, 'info': request_info} self._ovn_helper.add_request(request) def health_monitor_delete(self, healthmonitor): request_info = {'id': healthmonitor.healthmonitor_id, 'pool_id': healthmonitor.pool_id} request = {'type': ovn_const.REQ_TYPE_HM_DELETE, 'info': request_info} self._ovn_helper.add_request(request) def _ensure_loadbalancer(self, loadbalancer): try: ovn_lbs = self._ovn_helper._find_ovn_lbs_with_retry( loadbalancer.loadbalancer_id) except idlutils.RowNotFound: LOG.debug(f"OVN loadbalancer {loadbalancer.loadbalancer_id} " "not found. Start create process.") # TODO(froyo): By now just syncing LB, listener and pool only status = self._ovn_helper.lb_create( self._get_loadbalancer_request_info(loadbalancer)) if not isinstance(loadbalancer.listeners, o_datamodels.UnsetType): status[constants.LISTENERS] = [] for listener in loadbalancer.listeners: status_listener = self._ovn_helper.listener_create( self._get_listener_request_info(listener)) status[constants.LISTENERS].append( status_listener[constants.LISTENERS][0]) if not isinstance(loadbalancer.pools, o_datamodels.UnsetType): status[constants.POOLS] = [] for pool in loadbalancer.pools: status_pool = self._ovn_helper.pool_create( self._get_pool_request_info(pool)) status[constants.POOLS].append( status_pool[constants.POOLS][0]) for member in pool.members: status[constants.MEMBERS] = [] if not member.subnet_id: member.subnet_id = loadbalancer.vip_subnet_id status_member = self._ovn_helper.member_create( self._get_member_request_info(member)) status[constants.MEMBERS].append( status_member[constants.MEMBERS][0]) if pool.healthmonitor is not None and not isinstance( pool.healthmonitor, o_datamodels.UnsetType): status[constants.HEALTHMONITORS] = [] lbhcs, ovn_hm_lb = ( self._ovn_helper._find_ovn_lb_from_hm_id( pool.healthmonitor.healthmonitor_id) ) if not lbhcs and ovn_hm_lb is None: status_hm = self._ovn_helper.hm_create( self._get_healthmonitor_request_info( pool.healthmonitor)) status[constants.HEALTHMONITORS].append( status_hm[constants.HEALTHMONITORS][0]) self._ovn_helper._update_status_to_octavia(status) else: # Load Balancer found, check LB and listener/pool/member/hms # related for ovn_lb in ovn_lbs: LOG.debug( f"Sync - Loadbalancer {loadbalancer.loadbalancer_id} " "found checking other entities related") self._ovn_helper.lb_sync( self._get_loadbalancer_request_info(loadbalancer), ovn_lb) # Listener if not isinstance(loadbalancer.listeners, o_datamodels.UnsetType): for listener in loadbalancer.listeners: self._ovn_helper.listener_sync( self._get_listener_request_info(listener), ovn_lb) # Pool if not isinstance(loadbalancer.pools, o_datamodels.UnsetType): for pool in loadbalancer.pools: pool_info = self._get_pool_request_info(pool) self._ovn_helper.pool_sync(pool_info, ovn_lb) ovn_pool_key = self._ovn_helper._get_pool_key( pool_info[constants.ID], is_enabled=pool_info[constants.ADMIN_STATE_UP]) member_ids = [] if not isinstance(pool.members, o_datamodels.UnsetType): for member in pool.members: if not member.subnet_id: member.subnet_id = ( loadbalancer.vip_subnet_id ) self._ovn_helper.member_sync( self._get_member_request_info(member), ovn_lb, ovn_pool_key) member_ids.append(member.member_id) for ovn_mb_info in \ self._ovn_helper._get_members_in_ovn_lb( ovn_lb, ovn_pool_key): # If member ID not in pool member list, # delete it. if ovn_mb_info[3] not in member_ids: LOG.debug( "Start deleting extra member " f"{ovn_mb_info[3]} from pool " "{pool_info[constants.ID]} in OVN." ) mb_delete_info = { 'id': ovn_mb_info[3], 'subnet_id': ovn_mb_info[2], } self._ovn_helper.member_delete( mb_delete_info) mb_delete_dvr_info = { 'id': ovn_mb_info[3], 'address': ovn_mb_info[0], 'pool_id': pool_info[constants.ID], 'subnet_id': ovn_mb_info[2], 'action': ovn_const.REQ_INFO_MEMBER_DELETED } self._ovn_helper.handle_member_dvr( mb_delete_dvr_info) # Check health monitor if pool.healthmonitor is not None and not isinstance( pool.healthmonitor, o_datamodels.UnsetType): self._ovn_helper.hm_sync( self._get_healthmonitor_request_info( pool.healthmonitor), ovn_lb, ovn_pool_key) # Purge HM self._ovn_helper.hm_purge(loadbalancer.loadbalancer_id) status = self._ovn_helper._get_current_operating_statuses( ovn_lb) self._ovn_helper._update_status_to_octavia(status) def _fip_sync(self, loadbalancer): LOG.info("Starting sync floating IP for loadbalancer " f"{loadbalancer.loadbalancer_id}") if not loadbalancer.vip_port_id or not loadbalancer.vip_network_id: LOG.debug("VIP Port or Network not set for loadbalancer " f"{loadbalancer.loadbalancer_id}, skip FIP sync.") return # Try to get FIP from neutron fips = self._ovn_helper.get_fip_from_vip(loadbalancer) # get FIP from LSP vip_lsp = self._ovn_helper.get_lsp( port_id=loadbalancer.vip_port_id, network_id=loadbalancer.vip_network_id) lsp_fip = vip_lsp.external_ids.get( ovn_const.OVN_PORT_FIP_EXT_ID_KEY) if vip_lsp else None if fips: neutron_fip = fips[0].floating_ip_address if not vip_lsp: LOG.warn( "Logic Switch Port not found for port " f"{loadbalancer.vip_port_id}. " "Skip sync FIP for loadbalancer " f"{loadbalancer.loadbalancer_id}. Please " "run command `neutron-ovn-db-sync-util` " "first to sync OVN DB with Neutron DB.") return if lsp_fip != neutron_fip: LOG.warn( "Floating IP not consistent between Logic Switch " f"Port and Neutron. Found FIP {lsp_fip} " f"in LSP {vip_lsp.name}, but we have {neutron_fip} from " "Neutron. Skip sync FIP for " f"loadbalancer {loadbalancer.loadbalancer_id}. " "Please run command `neutron-ovn-db-sync-util` " "first to sync OVN DB with Neutron DB.") return self._ovn_helper.vip_port_update_handler( vip_lp=vip_lsp, fip=lsp_fip, action=ovn_const.REQ_INFO_ACTION_SYNC) else: LOG.warn("Floating IP not found for loadbalancer " f"{loadbalancer.loadbalancer_id}") if lsp_fip: LOG.warn( "Floating IP not consistent between Logic Switch " f"Port and Neutron. Found FIP {lsp_fip} configured " f"in LSP {vip_lsp.name}, but no FIP configured from " "Neutron. Please run command `neutron-ovn-db-sync-util` " "first to sync OVN DB with Neutron DB.") def do_sync(self, **lb_filters): LOG.info(f"Starting sync OVN DB with Loadbalancer filter {lb_filters}") octavia_client = clients.get_octavia_client() # We can add project_id to lb_filters for lbs to limit the scope. lbs = self._ovn_helper.get_octavia_lbs(octavia_client, **lb_filters) for lb in lbs: LOG.info(f"Starting sync OVN DB with Loadbalancer {lb.name}") provider_lb = ( self._ovn_helper._octavia_driver_lib.get_loadbalancer(lb.id) ) listeners = provider_lb.listeners or [] provider_lb.listeners = [ o_datamodels.Listener.from_dict(listener) for listener in listeners ] if listeners else o_datamodels.Unset pools = provider_lb.pools or [] provider_pools = [] for pool in pools: provider_pool = o_datamodels.Pool.from_dict(pool) # format member provider members = provider_pool.members if not isinstance(members, o_datamodels.UnsetType) and members: provider_pool.members = [ o_datamodels.Member.from_dict(m) for m in members] else: provider_pool.members = o_datamodels.Unset # format healthmonitor provider if not isinstance( provider_pool.healthmonitor, o_datamodels.UnsetType ) and provider_pool.healthmonitor is not None: provider_pool.healthmonitor = \ o_datamodels.HealthMonitor.from_dict( provider_pool.healthmonitor) provider_pools.append(provider_pool) provider_lb.pools = ( provider_pools if provider_pools else o_datamodels.Unset ) self._ensure_loadbalancer(provider_lb) self._fip_sync(provider_lb) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/event.py0000664000175100017510000000677715033037524023735 0ustar00mylesmyles# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from ovsdbapp.backend.ovs_idl import event as row_event # TODO(mjozefcz): Start consuming const and utils # from neutron-lib once released. from ovn_octavia_provider.common import constants as ovn_const LOG = logging.getLogger(__name__) class LogicalRouterPortEvent(row_event.RowEvent): def __init__(self, driver): table = 'Logical_Router_Port' events = (self.ROW_CREATE, self.ROW_DELETE) super().__init__(events, table, None) self.event_name = 'LogicalRouterPortEvent' self.driver = driver def run(self, event, row, old): LOG.debug('LogicalRouterPortEvent logged, ' '%(event)s, %(row)s', {'event': event, 'row': row}) if event == self.ROW_CREATE: self.driver.lb_create_lrp_assoc_handler(row) elif event == self.ROW_DELETE: self.driver.lb_delete_lrp_assoc_handler(row) class LogicalSwitchPortUpdateEvent(row_event.RowEvent): def __init__(self, driver): table = 'Logical_Switch_Port' events = (self.ROW_UPDATE,) super().__init__(events, table, None) self.event_name = 'LogicalSwitchPortUpdateEvent' self.driver = driver def match_fn(self, event, row, old): port_name = row.external_ids.get( ovn_const.OVN_PORT_NAME_EXT_ID_KEY, '') if hasattr(old, 'external_ids') and port_name.startswith( ovn_const.LB_VIP_PORT_PREFIX): return True return False def run(self, event, row, old): LOG.debug('LogicalSwitchPortUpdateEvent logged, ' '%(event)s, %(row)s', {'event': event, 'row': row}) fip_old = old.external_ids.get(ovn_const.OVN_PORT_FIP_EXT_ID_KEY) fip_new = row.external_ids.get(ovn_const.OVN_PORT_FIP_EXT_ID_KEY) if fip_old != fip_new: if fip_old and fip_new is None: action = ovn_const.REQ_INFO_ACTION_DISASSOCIATE fip = fip_old else: action = ovn_const.REQ_INFO_ACTION_ASSOCIATE fip = fip_new self.driver.vip_port_update_handler(row, fip, action) class ServiceMonitorUpdateEvent(row_event.RowEvent): def __init__(self, driver): table = 'Service_Monitor' events = (self.ROW_UPDATE, self.ROW_DELETE) super().__init__(events, table, None) self.event_name = 'ServiceMonitorUpdateEvent' self.driver = driver def run(self, event, row, old): LOG.debug('ServiceMonitorUpdateEvent logged, ' '%(event)s, %(row)s', {'event': event, 'row': row}) if event == self.ROW_DELETE: self.driver.sm_update_event_handler(row, sm_delete_event=True) elif event == self.ROW_UPDATE: self.driver.sm_update_event_handler(row) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5159845 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/hacking/0000775000175100017510000000000015033037526023627 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/hacking/__init__.py0000664000175100017510000000000015033037524025724 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/hacking/checks.py0000664000175100017510000001541315033037524025443 0ustar00mylesmyles# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for OVN Octavia provider specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range N3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the N3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to ovn_octavia_provider/tests/unit/hacking/test_checks.py """ import re from hacking import core unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b") unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b") filter_match = re.compile(r".*filter\(lambda ") tests_imports_dot = re.compile(r"\bimport[\s]+ovn_octavia_provider.tests\b") tests_imports_from1 = re.compile(r"\bfrom[\s]+ovn_octavia_provider.tests\b") tests_imports_from2 = re.compile( r"\bfrom[\s]+ovn_octavia_provider[\s]+import[\s]+tests\b") no_line_continuation_backslash_re = re.compile(r'.*(\\)\n') import_mock = re.compile(r"\bimport[\s]+mock\b") # noqa: H216 import_from_mock = re.compile(r"\bfrom[\s]+mock[\s]+import\b") @core.flake8ext def check_assert_called_once_with(logical_line, filename): """Try to detect unintended calls of nonexistent mock methods like: assert_called_once assertCalledOnceWith assert_has_called called_once_with N322 """ if 'ovn_octavia_provider/tests/' in filename: if '.assert_called_once_with(' in logical_line: return uncased_line = logical_line.lower().replace('_', '') check_calls = ['.assertcalledonce', '.calledoncewith'] if any(x for x in check_calls if x in uncased_line): msg = ("N322: Possible use of no-op mock method. " "please use assert_called_once_with.") yield (0, msg) if '.asserthascalled' in uncased_line: msg = ("N322: Possible use of no-op mock method. " "please use assert_has_calls.") yield (0, msg) @core.flake8ext def check_asserttruefalse(logical_line, filename): """N328 - Don't use assertEqual(True/False, observed).""" if 'ovn_octavia_provider/tests/' in filename: if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?", logical_line): msg = ("N328: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?", logical_line): msg = ("N328: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) @core.flake8ext def check_assertempty(logical_line, filename): """Enforce using assertEqual parameter ordering in case of empty objects. N330 """ if 'ovn_octavia_provider/tests/' in filename: msg = ("N330: Use assertEqual(*empty*, observed) instead of " "assertEqual(observed, *empty*). *empty* contains " "{}, [], (), set(), '', \"\"") empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")" reg = fr"assertEqual\(([^,]*,\s*)+?{empties}\)\s*$" if re.search(reg, logical_line): yield (0, msg) @core.flake8ext def check_assertisinstance(logical_line, filename): """N331 - Enforce using assertIsInstance.""" if 'ovn_octavia_provider/tests/' in filename: if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)", logical_line): msg = ("N331: Use assertIsInstance(observed, type) instead " "of assertTrue(isinstance(observed, type))") yield (0, msg) @core.flake8ext def check_assertequal_for_httpcode(logical_line, filename): """N332 - Enforce correct oredering for httpcode in assertEqual.""" msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) " "instead of assertEqual(observed_http_code, expected_http_code)") if 'ovn_octavia_provider/tests/' in filename: if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)", logical_line): yield (0, msg) @core.flake8ext def check_no_imports_from_tests(logical_line, filename): """N343 - Production code must not import from ovn_octavia_provider.tests.* """ msg = ("N343 Production code must not import from " "ovn_octavia_provider.tests.*") if 'ovn_octavia_provider/tests/' in filename: return for regex in tests_imports_dot, tests_imports_from1, tests_imports_from2: if re.match(regex, logical_line): yield (0, msg) @core.flake8ext def check_python3_no_filter(logical_line): """N344 - Use list comprehension instead of filter(lambda).""" msg = ("N344: Use list comprehension instead of " "filter(lambda obj: test(obj), data) on python3.") if filter_match.match(logical_line): yield (0, msg) @core.flake8ext def check_no_import_mock(logical_line, filename, noqa): """N347 - Test code must not import mock library.""" msg = ("N347: Test code must not import mock library") if noqa: return if 'ovn_octavia_provider/tests/' not in filename: return for regex in import_mock, import_from_mock: if re.match(regex, logical_line): yield (0, msg) @core.flake8ext def check_assertcountequal(logical_line, filename): """N348 - Enforce using assertCountEqual.""" msg = ("N348: Use assertCountEqual(expected, observed) " "instead of assertItemsEqual(observed, expected)") if 'ovn_octavia_provider/tests/' in filename: if re.search(r"assertItemsEqual\([^,]*,\s*(,[^,]*)?", logical_line): yield (0, msg) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/helper.py0000664000175100017510000056407215033037524024070 0ustar00mylesmyles# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import copy import queue import re import threading import netaddr from neutron_lib import constants as n_const from octavia_lib.api.drivers import data_models as o_datamodels from octavia_lib.api.drivers import driver_lib as o_driver_lib from octavia_lib.api.drivers import exceptions as driver_exceptions from octavia_lib.common import constants import openstack from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import strutils from ovn_octavia_provider.ovsdb import ovsdb_monitor from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp.schema.ovn_northbound import commands as cmd import tenacity from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import config as ovn_conf # TODO(mjozefcz): Start consuming const and utils # from neutron-lib once released. from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider.common import utils from ovn_octavia_provider.i18n import _ from ovn_octavia_provider.ovsdb import impl_idl_ovn CONF = cfg.CONF # Gets Octavia Conf as it runs under o-api domain LOG = logging.getLogger(__name__) class OvnProviderHelper(): def __init__(self, notifier=True): self.requests = queue.Queue() self.helper_thread = threading.Thread(target=self.request_handler) self.helper_thread.daemon = True self._octavia_driver_lib = o_driver_lib.DriverLibrary() ovsdb_monitor.check_and_set_ssl_files('OVN_Northbound') self._init_lb_actions() i = impl_idl_ovn.OvnNbIdlForLb(notifier=notifier) c = connection.Connection(i, ovn_conf.get_ovn_ovsdb_timeout()) self.ovn_nbdb_api = impl_idl_ovn.OvsdbNbOvnIdl(c) atexit.register(self.ovn_nbdb_api.ovsdb_connection.stop) self.helper_thread.start() def _init_lb_actions(self): self._lb_request_func_maps = { ovn_const.REQ_TYPE_LB_CREATE: self.lb_create, ovn_const.REQ_TYPE_LB_DELETE: self.lb_delete, ovn_const.REQ_TYPE_LB_UPDATE: self.lb_update, ovn_const.REQ_TYPE_LISTENER_CREATE: self.listener_create, ovn_const.REQ_TYPE_LISTENER_DELETE: self.listener_delete, ovn_const.REQ_TYPE_LISTENER_UPDATE: self.listener_update, ovn_const.REQ_TYPE_POOL_CREATE: self.pool_create, ovn_const.REQ_TYPE_POOL_DELETE: self.pool_delete, ovn_const.REQ_TYPE_POOL_UPDATE: self.pool_update, ovn_const.REQ_TYPE_MEMBER_CREATE: self.member_create, ovn_const.REQ_TYPE_MEMBER_DELETE: self.member_delete, ovn_const.REQ_TYPE_MEMBER_UPDATE: self.member_update, ovn_const.REQ_TYPE_LB_CREATE_LRP_ASSOC: self.lb_create_lrp_assoc, ovn_const.REQ_TYPE_LB_DELETE_LRP_ASSOC: self.lb_delete_lrp_assoc, ovn_const.REQ_TYPE_HANDLE_VIP_FIP: self.handle_vip_fip, ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR: self.handle_member_dvr, ovn_const.REQ_TYPE_HM_CREATE: self.hm_create, ovn_const.REQ_TYPE_HM_UPDATE: self.hm_update, ovn_const.REQ_TYPE_HM_DELETE: self.hm_delete, ovn_const.REQ_TYPE_HM_UPDATE_EVENT: self.hm_update_event, } @staticmethod def _is_lb_empty(external_ids): """Check if there is no pool or listener defined.""" return not any(k.startswith('listener') or k.startswith('pool') for k in external_ids) @staticmethod def _delete_disabled_from_status(status): # pylint: disable=multiple-statements d_regex = f':{ovn_const.DISABLED_RESOURCE_SUFFIX}$' return { k: [{c: re.sub(d_regex, '', d) for c, d in i.items()} for i in v] for k, v in status.items()} def shutdown(self): self.requests.put({'type': ovn_const.REQ_TYPE_EXIT}, timeout=ovn_const.MAX_TIMEOUT_REQUEST) @staticmethod def _map_val(row, col, key): # If the row doesnt exist, RowNotFound is raised by the _map_val # and is expected to be caught by the caller. try: return getattr(row, col)[key] except KeyError as e: raise idlutils.RowNotFound(table=row._table.name, col=col, match=key) from e def _create_hm_port(self, network_id, subnet_id, project_id): port = {'name': ovn_const.LB_HM_PORT_PREFIX + str(subnet_id), 'network_id': network_id, 'fixed_ips': [{'subnet_id': subnet_id}], 'admin_state_up': True, 'port_security_enabled': False, 'device_owner': ovn_const.OVN_LB_HM_PORT_DISTRIBUTED, 'device_id': ovn_const.LB_HM_PORT_PREFIX + str(subnet_id), 'project_id': project_id} neutron_client = clients.get_neutron_client() try: return neutron_client.create_port(**port) except openstack.exceptions.HttpException: # NOTE (froyo): whatever other exception as e.g. Timeout # we should try to ensure no leftover port remains self._clean_up_hm_port(subnet_id) return None def _clean_up_hm_port(self, subnet_id): # Method to delete the hm port created for subnet_id it there isn't any # other health monitor using it neutron_client = clients.get_neutron_client() hm_port_ip = None hm_checks_port = self._neutron_list_ports( neutron_client, name=f'{ovn_const.LB_HM_PORT_PREFIX}{subnet_id}') # NOTE(froyo): Just to cover the case that we have more than one # hm-port created by a race condition on create_hm_port and we need # to ensure no leftover ports remains for hm_port in hm_checks_port: for fixed_ip in hm_port.fixed_ips: if fixed_ip['subnet_id'] == subnet_id: hm_port_ip = fixed_ip['ip_address'] if hm_port_ip: lbs = self.ovn_nbdb_api.db_find_rows( 'Load_Balancer', ('health_check', '!=', [])).execute() for lb in lbs: for k, v in lb.ip_port_mappings.items(): if hm_port_ip in v: return # Not found any other health monitor using the hm port self.delete_port(hm_port.id) def _ensure_hm_ovn_port(self, network_id, subnet_id, project_id): # We will use a dedicated port for this, so we should find the one # related to the network id, if not found, create a new one and use it. neutron_client = clients.get_neutron_client() hm_checks_port = self._neutron_find_port( neutron_client, network_id=network_id, name_or_id=f'{ovn_const.LB_HM_PORT_PREFIX}{subnet_id}') if hm_checks_port: return hm_checks_port return self._create_hm_port(network_id, subnet_id, project_id) def _get_nw_router_info_on_interface_event(self, lrp): """Get the Router and Network information on an interface event This function is called when a new interface between a router and a network is added or deleted. Input: Logical Router Port row which is coming from LogicalRouterPortEvent. Output: A row from router table and network table matching the router and network for which the event was generated. Exception: RowNotFound exception can be generated. """ router = self.ovn_nbdb_api.lookup( 'Logical_Router', utils.ovn_name(self._map_val( lrp, 'external_ids', ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY))) network = self.ovn_nbdb_api.lookup( 'Logical_Switch', self._map_val(lrp, 'external_ids', ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY)) return router, network def _clean_lb_if_empty(self, ovn_lb, lb_id, external_ids): commands = [] lb_to_delete = False if OvnProviderHelper._is_lb_empty(external_ids): # Verify if its only OVN LB defined. If so - leave with # undefined protocol. If there is different for other protocol # remove this one. try: defined_ovn_lbs = self._find_ovn_lbs(lb_id) except idlutils.RowNotFound: defined_ovn_lbs = [] if len(defined_ovn_lbs) == 1: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('protocol', []))) elif len(defined_ovn_lbs) > 1: # Delete the lb. commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid)) lb_to_delete = True return (commands, lb_to_delete) def lb_delete_lrp_assoc_handler(self, row): try: router, network = self._get_nw_router_info_on_interface_event(row) except idlutils.RowNotFound: LOG.debug("Router or network information not found") return request_info = {'network': network, 'router': router} self.add_request({'type': ovn_const.REQ_TYPE_LB_DELETE_LRP_ASSOC, 'info': request_info}) def lb_delete_lrp_assoc(self, info): # TODO(reedip): When OVS>=2.12, LB can be deleted without removing # Network and Router references as pushed in the patch # https://github.com/openvswitch/ovs/commit # /612f80fa8ebf88dad2e204364c6c02b451dca36c network = info['network'] router = info['router'] # Find all loadbalancers which have a reference with the network nw_lb = self._find_lb_in_ls(network=network) # Find all loadbalancers which have a reference with the router r_lb = set(router.load_balancer) - nw_lb # Delete all LB on N/W from Router for nlb in nw_lb: try: self._update_lb_to_lr_association(nlb, router, delete=True) except idlutils.RowNotFound: LOG.warning("The disassociation of loadbalancer %s to the " "logical router %s failed, trying step by step", nlb.uuid, router.uuid) self._update_lb_to_lr_association_by_step( nlb, router, delete=True) # Delete all LB on Router from N/W for rlb in r_lb: try: self._update_lb_to_ls_association( rlb, network_id=utils.ovn_uuid(network.name), associate=False, update_ls_ref=False) except idlutils.RowNotFound: LOG.warning("The disassociation of loadbalancer %s to the " "logical switch %s failed, just keep going on", rlb.uuid, utils.ovn_uuid(network.name)) pass def lb_create_lrp_assoc_handler(self, row): try: router, network = self._get_nw_router_info_on_interface_event(row) except idlutils.RowNotFound: LOG.debug("Router or network information not found") return request_info = {'network': network, 'router': router, 'is_gw_port': strutils.bool_from_string( row.external_ids.get( ovn_const.OVN_ROUTER_IS_EXT_GW))} self.add_request({'type': ovn_const.REQ_TYPE_LB_CREATE_LRP_ASSOC, 'info': request_info}) def lb_create_lrp_assoc(self, info): router_lb = set(info['router'].load_balancer) network_lb = set(info['network'].load_balancer) # Add only those lb to routers which are unique to the network for lb in (network_lb - router_lb): try: self._update_lb_to_lr_association(lb, info['router']) except idlutils.RowNotFound: LOG.warning("The association of loadbalancer %s to the " "logical router %s failed, trying step by step", lb.uuid, info['router'].uuid) self._update_lb_to_lr_association_by_step(lb, info['router']) # if lrp port is a gw port, there is no need to re-add the # loadbalancers from the router into the provider network. # This will be already done for loadbalancer created with VIPs on # provider networks. And it should never be True there when the VIPs # are on tenant networks. if info['is_gw_port']: return # Add those lb to the network which are unique to the router for lb in (router_lb - network_lb): try: self._update_lb_to_ls_association( lb, network_id=utils.ovn_uuid(info['network'].name), associate=True, update_ls_ref=False) except idlutils.RowNotFound: LOG.warning("The association of loadbalancer %s to the " "logical switch %s failed, just keep going on", lb.uuid, utils.ovn_uuid(info['network'].name)) pass def vip_port_update_handler(self, vip_lp, fip, action): """Handler for VirtualIP port updates. If a floating ip is associated to a vip port, then networking-ovn sets the fip in the external_ids column of the logical port as: Logical_Switch_Port.external_ids:port_fip = . Then, in the Load_Balancer table for the vip, networking-ovn creates another vip entry for the FIP. If a floating ip is disassociated from the vip, then it deletes the vip entry for the FIP. """ port_name = vip_lp.external_ids.get(ovn_const.OVN_PORT_NAME_EXT_ID_KEY) additional_vip = False if port_name.startswith(ovn_const.LB_VIP_ADDIT_PORT_PREFIX): lb_id = utils.get_uuid(port_name) additional_vip = True else: lb_id = port_name[len(ovn_const.LB_VIP_PORT_PREFIX):] try: ovn_lbs = self._find_ovn_lbs_with_retry(lb_id) except idlutils.RowNotFound: LOG.debug("Loadbalancer %s not found!", lb_id) return # Loop over all defined LBs with given ID, because it is possible # than there is more than one (for more than 1 L4 protocol). neutron_client = clients.get_neutron_client() for lb in ovn_lbs: port = neutron_client.get_port(vip_lp.name) request_info = {'ovn_lb': lb, 'vip_fip': fip, 'vip_related': [], 'additional_vip_fip': additional_vip, 'action': action} if port: request_info['vip_related'] = [ ip['ip_address'] for ip in port.fixed_ips] if action != ovn_const.REQ_INFO_ACTION_SYNC: self.add_request({'type': ovn_const.REQ_TYPE_HANDLE_VIP_FIP, 'info': request_info}) else: self.handle_vip_fip(request_info) def _find_lb_in_ls(self, network): """Find LB associated to a Network using Network information This function retrieves those loadbalancers whose ls_ref column in the OVN northbound database's load_balancer table has the network's name. Though different networks can be associated with a loadbalancer, but ls_ref of a loadbalancer points to the network where it was actually created, and this function tries to retrieve all those loadbalancers created on this network. Input : row of type Logical_Switch Output: set of rows of type Load_Balancer or empty set """ return {lb for lb in network.load_balancer if network.name in lb.external_ids.get( ovn_const.LB_EXT_IDS_LS_REFS_KEY, [])} def _find_lb_in_table(self, lb, table): return self.ovn_nbdb_api.find_lb_in_table( lb, table).execute(check_error=True) def request_handler(self): while True: try: request = self.requests.get( timeout=ovn_const.MAX_TIMEOUT_REQUEST) except queue.Empty: continue request_type = request['type'] if request_type == ovn_const.REQ_TYPE_EXIT: break request_handler = self._lb_request_func_maps.get(request_type) try: if request_handler: LOG.debug("Handling request %(req)s with info %(info)s", {'req': request_type, 'info': request['info']}) status = request_handler(request['info']) if status: self._update_status_to_octavia(status) self.requests.task_done() except driver_exceptions.UpdateStatusError as e: LOG.error("Error while updating the load balancer status: %s", e.fault_string) # TODO(haleyb): The resource(s) we were updating status for # should be cleaned-up except Exception: # If any unexpected exception happens we don't want the # notify_loop to exit. LOG.exception('Unexpected exception in request_handler') def add_request(self, req): self.requests.put(req, timeout=ovn_const.MAX_TIMEOUT_REQUEST) @tenacity.retry( retry=tenacity.retry_if_exception_type( driver_exceptions.UpdateStatusError), wait=tenacity.wait_exponential(max=75), stop=tenacity.stop_after_attempt(15), reraise=True) def _update_status_to_octavia(self, status): status = OvnProviderHelper._delete_disabled_from_status(status) LOG.debug('Updating status to octavia: %s', status) self._octavia_driver_lib.update_loadbalancer_status(status) @tenacity.retry( retry=tenacity.retry_if_exception_type(idlutils.RowNotFound), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_delay(10), reraise=True) def _find_ovn_lbs_with_retry(self, lb_id, protocol=None): return self._find_ovn_lbs(lb_id, protocol=protocol) @tenacity.retry( retry=tenacity.retry_if_exception_type( openstack.exceptions.HttpException), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_delay(10), reraise=True) def _neutron_list_ports(self, neutron_client, **params): return neutron_client.ports(**params) @tenacity.retry( retry=tenacity.retry_if_exception_type( openstack.exceptions.HttpException), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_delay(10), reraise=True) def _neutron_find_port(self, neutron_client, **params): return neutron_client.find_port(**params) @tenacity.retry( retry=tenacity.retry_if_exception_type( openstack.exceptions.HttpException), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_delay(10), reraise=True) def get_octavia_lbs(self, octavia_client, **params): return octavia_client.load_balancers(**params) def _get_neutron_client(self): try: return clients.get_neutron_client() except driver_exceptions.DriverError as e: LOG.warn(f"Cannot get client from neutron {e}") return None def _get_vip_port_and_subnet_from_lb(self, neutron_client, vip_port_id, vip_net_id, vip_address, subnet_requested=True): try: return self._get_port_from_info( neutron_client, vip_port_id, vip_net_id, vip_address, subnet_requested ) except openstack.exceptions.ResourceNotFound: LOG.warn("Load balancer VIP port and subnet not found.") return None, None except AttributeError: LOG.warn("Load Balancer VIP port missing information.") return None, None def _build_external_ids(self, loadbalancer, port): external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: loadbalancer.get( constants.VIP_ADDRESS), ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: loadbalancer.get( constants.VIP_PORT_ID) or port.id, 'enabled': str(loadbalancer.get(constants.ADMIN_STATE_UP)) } if loadbalancer.get(constants.ADDITIONAL_VIPS): addi_vip = ','.join(x['ip_address'] for x in loadbalancer.get( constants.ADDITIONAL_VIPS)) addi_vip_port_id = ','.join(x['port_id'] for x in loadbalancer.get( constants.ADDITIONAL_VIPS)) external_ids.update({ ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: addi_vip, ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY: addi_vip_port_id }) vip_fip = loadbalancer.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) if vip_fip: external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = vip_fip additional_vip_fip = loadbalancer.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY) if additional_vip_fip: external_ids[ ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY] = additional_vip_fip lr_ref = loadbalancer.get(ovn_const.LB_EXT_IDS_LR_REF_KEY) if lr_ref: external_ids[ovn_const.LB_EXT_IDS_LR_REF_KEY] = lr_ref return external_ids def _sync_external_ids(self, ovn_lb, external_ids, commands): is_same = all(ovn_lb.external_ids.get(k) == v for k, v in external_ids.items()) if not is_same: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', external_ids)) ) def _build_selection_fields(self, loadbalancer): lb_algorithm = loadbalancer.get(constants.LB_ALGORITHM, constants.LB_ALGORITHM_SOURCE_IP_PORT) if self._are_selection_fields_supported(): return self._get_selection_keys(lb_algorithm) return None def _sync_selection_fields(self, ovn_lb, selection_fields, commands): if selection_fields and selection_fields != ovn_lb.selection_fields: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('selection_fields', selection_fields)) ) def _sync_lb_associations(self, neutron_client, ovn_lb, port, subnet, loadbalancer): # NOTE(ltomasbo): If the VIP is on a provider network, it does # not need to be associated to its LS network = neutron_client.get_network(port.network_id) if network and not network.provider_physical_network: # NOTE(froyo): This is the association of the lb to the VIP ls # so this is executed right away. For the additional vip ports # this step is not required since all subnets must belong to # the same subnet, so just for the VIP LB port is enough. try: self._update_lb_to_ls_association( ovn_lb, network_id=port.network_id, associate=True, update_ls_ref=True, additional_vips=True, is_sync=True) except idlutils.RowNotFound: LOG.warning("The association of loadbalancer %s to the " "logical switch %s failed, just keep going on", ovn_lb.uuid, utils.ovn_uuid(network.name)) ls_name = utils.ovn_name(subnet.network_id) try: ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( check_error=True) ovn_lr = self._find_lr_of_ls(ovn_ls, subnet.gateway_ip) except Exception as e: LOG.warning("OVN Logical Switch or Logical Router not found: " f"{e}") ovn_lr = None if ovn_lr: self._sync_lb_to_lr_association(ovn_lb, ovn_lr) # NOTE(mjozefcz): In case of LS references where passed - # apply LS to the new LB. That could happend in case we # need another loadbalancer for other L4 protocol. ls_refs = loadbalancer.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) if ls_refs: try: ls_refs = jsonutils.loads(ls_refs) except ValueError: ls_refs = {} for ls in ls_refs: # Skip previously added LS because we don't want # to duplicate. if ls == ovn_ls.name: continue self._update_lb_to_ls_association( ovn_lb, network_id=utils.ovn_uuid(ls), associate=True, update_ls_ref=True, is_sync=True) def _sync_lb_to_lr_association(self, ovn_lb, ovn_lr): try: # NOTE(froyo): This is the association of the lb to the # router associated to VIP ls and all ls connected to that # router we try atomically, if it fails we will go step by # step, discarding the associations from lb to a # non-existent ls, but we will demand the association of # lb to lr self._update_lb_to_lr_association(ovn_lb, ovn_lr, is_sync=True) except idlutils.RowNotFound: LOG.warning("The association of loadbalancer %s to the " "logical router %s failed, trying step by " "step", ovn_lb.uuid, ovn_lr.uuid) try: self._update_lb_to_lr_association_by_step(ovn_lb, ovn_lr, is_sync=True) except Exception as e: LOG.exception("Unexpected error during step-by-step " "association of loadbalancer %s to logical " "router %s: %s", ovn_lb.uuid, ovn_lr.uuid, str(e)) def _build_listener_info(self, listener, external_ids): """Build listener key and listener info.""" listener_key = self._get_listener_key( listener.get(constants.ID), is_enabled=listener.get(constants.ADMIN_STATE_UP) ) pool_key = '' if listener.get(constants.DEFAULT_POOL_ID): pool_key = self._get_pool_key( listener.get(constants.DEFAULT_POOL_ID)) external_ids[listener_key] = self._make_listener_key_value( listener[constants.PROTOCOL_PORT], pool_key ) listener_info = {listener_key: external_ids[listener_key]} return listener_key, listener_info def _update_listener_key_if_needed(self, listener_key, listener_info, ovn_lb, commands): """Update listener key on OVN LoadBalancer if needed.""" prev_listener_key_content = ovn_lb.external_ids.get(listener_key, '') if (listener_key not in ovn_lb.external_ids or listener_info.get(listener_key) != prev_listener_key_content): commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', listener_info) ) ) def _update_protocol_if_needed(self, listener, ovn_lb, commands): """Update protocol on OVN LoadBalancer if needed.""" current_protocol = '' if ovn_lb.protocol: current_protocol = ovn_lb.protocol[0].lower() listener_protocol = str(listener.get(constants.PROTOCOL)).lower() if current_protocol != listener_protocol: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('protocol', listener_protocol) ) ) def _prepare_external_ids(self, pool, ovn_lb): """Prepare the updated external_ids for the LoadBalancer.""" external_ids = copy.deepcopy(ovn_lb.external_ids) pool_key = self._get_pool_key( pool[constants.ID], is_enabled=pool[constants.ADMIN_STATE_UP]) external_ids[pool_key] = '' if pool[constants.LISTENER_ID]: self._update_listener_association( pool, ovn_lb, external_ids, pool_key) return external_ids def _update_listener_association(self, pool, ovn_lb, external_ids, pool_key): """Update the listener association in external_ids.""" listener_key = self._get_listener_key(pool[constants.LISTENER_ID]) if listener_key in ovn_lb.external_ids: pool_key_enable = self._get_pool_key(pool[constants.ID], is_enabled=True) pool_key_disable = self._get_pool_key(pool[constants.ID], is_enabled=False) if pool[constants.ID] in external_ids[listener_key]: # Remove existing pool keys before adding the updated key external_ids[listener_key] = ( external_ids[listener_key] .replace(pool_key_disable, '') .replace(pool_key_enable, '') ) external_ids[listener_key] += str(pool_key) def _extract_persistence_timeout(self, pool): """Extract persistence timeout value from the pool, if available.""" if pool.get(constants.SESSION_PERSISTENCE): return pool[constants.SESSION_PERSISTENCE].get( constants.PERSISTENCE_TIMEOUT, '360') return None def _add_external_ids_command(self, commands, ovn_lb, external_ids): """Add a command to update the external_ids of the LoadBalancer.""" commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', external_ids)) ) def _add_persistence_timeout_command(self, commands, ovn_lb, persistence_timeout): """Add command to update persistence timeout in LoadBalancer.""" options = copy.deepcopy(ovn_lb.options) options[ovn_const.AFFINITY_TIMEOUT] = str(persistence_timeout) if ovn_lb.options != options: commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('options', options)) ) def _update_pool_data(self, member, pool_key, external_ids): """Update pool data with member information.""" pool_data = None existing_members = external_ids.get(pool_key, "") member_info = self._get_member_info(member) if existing_members: members = existing_members.split(",") if member_info not in members: members.append(member_info) pool_data = {pool_key: ",".join(members)} else: pool_data = {pool_key: member_info} return pool_data def _add_pool_data_command(self, commands, ovn_lb, pool_data): """Add command to update pool data in LoadBalancer.""" if pool_data: commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', pool_data)) ) def _get_related_lr(self, member): """Retrieve the logical router related to the member's subnet.""" neutron_client = clients.get_neutron_client() try: subnet = neutron_client.get_subnet(member[constants.SUBNET_ID]) ls_name = utils.ovn_name(subnet.network_id) ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( check_error=True) return self._find_lr_of_ls(ovn_ls, subnet.gateway_ip) except (idlutils.RowNotFound, openstack.exceptions.ResourceNotFound): return None def _lb_status(self, loadbalancer, provisioning_status, operating_status): """Return status for the LoadBalancer.""" return { constants.LOADBALANCERS: [ { constants.ID: loadbalancer[constants.ID], constants.PROVISIONING_STATUS: provisioning_status, constants.OPERATING_STATUS: operating_status, } ] } def _find_ovn_lbs(self, lb_id, protocol=None): """Find the Loadbalancers in OVN with the given lb_id as its name This function searches for the LoadBalancers whose Name has the pattern passed in lb_id. @param lb_id: LoadBalancer ID provided by Octavia in its API request. Note that OVN saves the above ID in the 'name' column. @type lb_id: str @param protocol: Loadbalancer protocol. @type protocol: str or None if not defined. :returns: LoadBalancer row if protocol specified or list of rows matching the lb_id. :raises: RowNotFound can be generated if the LoadBalancer is not found. """ lbs = self.ovn_nbdb_api.db_find_rows( 'Load_Balancer', ('name', '=', lb_id)).execute() if not protocol: if lbs: return lbs raise idlutils.RowNotFound(table='Load_Balancer', col='name', match=lb_id) # If there is only one LB without protocol defined, so # it is 'clean' LB record without any listener. if len(lbs) == 1 and not lbs[0].protocol: return lbs[0] # Search for other lbs. for lb in lbs: if lb.protocol[0].upper() == protocol.upper(): return lb raise idlutils.RowNotFound(table='Load_Balancer', col='name', match=lb_id) def _get_or_create_ovn_lb( self, lb_id, protocol, admin_state_up, lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT): """Find or create ovn lb with given protocol Find the loadbalancer configured with given protocol or create required if not found """ # TODO(mjozefcz): For now we support only one LB algorithm. # As we may extend that in the future we would need to # look here also for lb_algorithm, along with protocol. # Make sure that its lowercase - OVN NBDB stores lowercases # for this field. protocol = protocol.lower() ovn_lbs = self._find_ovn_lbs(lb_id) lbs_with_required_protocol = [ ovn_lb for ovn_lb in ovn_lbs if protocol in ovn_lb.protocol] lbs_with_no_protocol = [ovn_lb for ovn_lb in ovn_lbs if not ovn_lb.protocol] if lbs_with_required_protocol: # We found existing LB with required # protocol, just return it. return lbs_with_required_protocol[0] elif lbs_with_no_protocol: ovn_lb = lbs_with_no_protocol[0] # Set required protocol here. self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('protocol', protocol)).execute(check_error=True) else: # NOTE(mjozefcz): Looks like loadbalancer with given protocol # doesn't exist. Try to add it with required protocol # by copy the existing one data. lb_info = { 'id': lb_id, 'protocol': protocol, constants.LB_ALGORITHM: lb_algorithm, 'vip_address': ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_VIP_KEY), 'vip_port_id': ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY), ovn_const.LB_EXT_IDS_LR_REF_KEY: ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY), ovn_const.LB_EXT_IDS_LS_REFS_KEY: ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_LS_REFS_KEY), constants.ADDITIONAL_VIPS: self._get_additional_vips_from_loadbalancer_id(lb_id), 'admin_state_up': admin_state_up} # NOTE(mjozefcz): Handle vip_fip info if exists. vip_fip = ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_VIP_FIP_KEY) if vip_fip: lb_info.update({ovn_const.LB_EXT_IDS_VIP_FIP_KEY: vip_fip}) additional_vip_fip = ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY, None) if additional_vip_fip: lb_info.update({ ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY: additional_vip_fip}) self.lb_create(lb_info, protocol=protocol) # Looks like we've just added new LB # or updated exising, empty one. return self._find_ovn_lbs(lb_id, protocol=protocol) def _find_ovn_lb_with_pool_key(self, pool_key): lbs = self.ovn_nbdb_api.db_list_rows( 'Load_Balancer').execute(check_error=True) for lb in lbs: # Skip load balancers used by port forwarding plugin if lb.external_ids.get(ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY) == ( ovn_const.PORT_FORWARDING_PLUGIN): continue if pool_key in lb.external_ids: return lb def _find_ovn_lb_by_pool_id(self, pool_id): pool_key = self._get_pool_key(pool_id) ovn_lb = self._find_ovn_lb_with_pool_key(pool_key) if not ovn_lb: pool_key = self._get_pool_key(pool_id, is_enabled=False) ovn_lb = self._find_ovn_lb_with_pool_key(pool_key) return pool_key, ovn_lb def _check_ip_in_subnet(self, ip, subnet): return (netaddr.IPAddress(ip) in netaddr.IPNetwork(subnet)) def _get_subnet_from_pool(self, pool_id): pool = self._octavia_driver_lib.get_pool(pool_id) if not pool: return None, None lb = self._octavia_driver_lib.get_loadbalancer(pool.loadbalancer_id) if lb and lb.vip_subnet_id: neutron_client = clients.get_neutron_client() try: subnet = neutron_client.get_subnet(lb.vip_subnet_id) vip_subnet_cidr = subnet.cidr except openstack.exceptions.ResourceNotFound: LOG.warning('Subnet %s not found while trying to ' 'fetch its data.', lb.vip_subnet_id) return None, None return lb.vip_subnet_id, vip_subnet_cidr return None, None def _execute_commands(self, commands): if commands: with self.ovn_nbdb_api.transaction(check_error=True) as txn: for command in commands: txn.add(command) @tenacity.retry( retry=tenacity.retry_if_exception_type(idlutils.RowNotFound), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_attempt(3), reraise=True) def _update_lb_to_ls_association(self, ovn_lb, network_id=None, subnet_id=None, associate=True, update_ls_ref=True, additional_vips=False, is_sync=False): # Note(froyo): Large topologies can change from the time we # list the ls association commands and the execution, retry # if this situation arises. commands = self._get_lb_to_ls_association_commands( ovn_lb, network_id, subnet_id, associate, update_ls_ref, additional_vips, is_sync=is_sync) self._execute_commands(commands) def _get_lb_to_ls_association_commands(self, ovn_lb, network_id=None, subnet_id=None, associate=True, update_ls_ref=True, additional_vips=True, is_sync=False): """Update LB association with Logical Switch This function deals with updating the References of Logical Switch in LB and addition of LB to LS. """ ovn_ls = None commands = [] if not network_id and not subnet_id: return commands if network_id: ls_name = utils.ovn_name(network_id) else: neutron_client = self._get_neutron_client() if not neutron_client: return [] try: subnet = neutron_client.get_subnet(subnet_id) ls_name = utils.ovn_name(subnet.network_id) except openstack.exceptions.ResourceNotFound: LOG.warning('Subnet %s not found while trying to ' 'fetch its data.', subnet_id) ls_name = None skip_ls_lb_actions = False if ls_name: try: ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( check_error=True) except idlutils.RowNotFound: LOG.warning("LogicalSwitch %s could not be found.", ls_name) if associate: LOG.warning('Cannot associate LB %(lb)s to ' 'LS %(ls)s because LS row ' 'not found in OVN NBDB. Exiting.', {'ls': ls_name, 'lb': ovn_lb.name}) return commands # if is_sync and LB already in LS_LB, we don't need to call to # ls_lb_add if is_sync and ovn_ls: for ls_lb in ovn_ls.load_balancer: if str(ls_lb.uuid) == str(ovn_lb.uuid): # lb already in ls, skip assocate for sync steps skip_ls_lb_actions = True break ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) if ls_refs: try: ls_refs = jsonutils.loads(ls_refs) except ValueError: ls_refs = {} else: ls_refs = {} if skip_ls_lb_actions: if ls_name not in ls_refs: ls_refs[ls_name] = 1 else: if associate and ls_name: if ls_name in ls_refs: ls_refs[ls_name] += 1 else: ls_refs[ls_name] = 1 # NOTE(froyo): To cover the initial lb to ls association, # where additional vips shall be in the same network as VIP # port, and the ls_ref[vip_network_id] should take them # into account. if additional_vips: addi_vips = ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY, '') if addi_vips: ls_refs[ls_name] += len(addi_vips.split(',')) if ovn_ls: commands.append(self.ovn_nbdb_api.ls_lb_add( ovn_ls.uuid, ovn_lb.uuid, may_exist=True)) else: if ls_name not in ls_refs: if ovn_ls: commands.append(self.ovn_nbdb_api.ls_lb_del( ovn_ls.uuid, ovn_lb.uuid, if_exists=True)) # Nothing else to be done. return commands ref_ct = ls_refs[ls_name] if ref_ct == 1: del ls_refs[ls_name] if ovn_ls: commands.append(self.ovn_nbdb_api.ls_lb_del( ovn_ls.uuid, ovn_lb.uuid, if_exists=True)) else: ls_refs[ls_name] = ref_ct - 1 if update_ls_ref: check_ls_refs = False if is_sync: ovn_ls_refs = ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_LS_REFS_KEY, {}) if ovn_ls_refs: try: ovn_ls_refs = jsonutils.loads(ovn_ls_refs) except ValueError: ovn_ls_refs = {} if ovn_ls_refs.keys() == ls_refs.keys(): check_ls_refs = True if not check_ls_refs: ls_refs_dict = { ovn_const.LB_EXT_IDS_LS_REFS_KEY: jsonutils.dumps( ls_refs) } commands.append(self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', ls_refs_dict))) return commands def _del_lb_to_lr_association(self, ovn_lb, ovn_lr, lr_ref): commands = [] if lr_ref: try: lr_ref = [r for r in [lr.strip() for lr in lr_ref.split(',')] if r != ovn_lr.name] except ValueError: LOG.warning('The loadbalancer %(lb)s is not associated with ' 'the router %(router)s', {'lb': ovn_lb.name, 'router': ovn_lr.name}) if lr_ref: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', {ovn_const.LB_EXT_IDS_LR_REF_KEY: ','.join(lr_ref)}))) else: commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (ovn_const.LB_EXT_IDS_LR_REF_KEY))) commands.append( self.ovn_nbdb_api.lr_lb_del(ovn_lr.uuid, ovn_lb.uuid, if_exists=True)) lb_vip = netaddr.IPNetwork( ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_KEY)) for net in self._find_ls_for_lr(ovn_lr, ip_version=lb_vip.version): commands.append(self.ovn_nbdb_api.ls_lb_del( net, ovn_lb.uuid, if_exists=True)) return commands def _add_lb_to_lr_association(self, ovn_lb, ovn_lr, lr_rf, is_sync=False): commands = [] need_lr_sync = False # Check if lb not in lr and needs to be added if is_sync: lr_lbs = [str(lr_lb.uuid) for lr_lb in ovn_lr.load_balancer] if str(ovn_lb.uuid) not in lr_lbs: need_lr_sync = True if not is_sync or need_lr_sync: commands.append( self.ovn_nbdb_api.lr_lb_add(ovn_lr.uuid, ovn_lb.uuid, may_exist=True)) lb_vip = netaddr.IPNetwork( ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_KEY)) for net in self._find_ls_for_lr(ovn_lr, ip_version=lb_vip.version): skip_ls_lb_actions = False if is_sync: try: ovn_ls = self.ovn_nbdb_api.ls_get(net).execute( check_error=True) for ls_lb in ovn_ls.load_balancer: if str(ls_lb.uuid) == str(ovn_lb.uuid): # lb already in ls, skip assocate for sync steps skip_ls_lb_actions = True except idlutils.RowNotFound: LOG.warning("LogicalSwitch %s could not be found.", net) if not skip_ls_lb_actions: commands.append(self.ovn_nbdb_api.ls_lb_add( net, ovn_lb.uuid, may_exist=True)) if ovn_lr.name not in str(lr_rf): # Multiple routers in lr_rf are separated with ',' if lr_rf: lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY: f"{lr_rf},{ovn_lr.name}"} else: lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY: ovn_lr.name} commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', lr_rf))) return commands @tenacity.retry( retry=tenacity.retry_if_exception_type(idlutils.RowNotFound), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_attempt(3), reraise=True) def _update_lb_to_lr_association(self, ovn_lb, ovn_lr, delete=False, is_sync=False): # Note(froyo): Large topologies can change from the time we # list the ls associated to lr until we execute the # association command, retry if this situation arises. commands = self._get_lb_to_lr_association_commands( ovn_lb, ovn_lr, delete, is_sync=is_sync) self._execute_commands(commands) def _update_lb_to_lr_association_by_step(self, ovn_lb, ovn_lr, delete=False, is_sync=False): # Note(froyo): just to make association commands step by # step, in order to keep going on when LsLbAdd or LsLbDel # happen. commands = self._get_lb_to_lr_association_commands( ovn_lb, ovn_lr, delete, is_sync=is_sync) for command in commands: try: command.execute(check_error=True) except idlutils.RowNotFound: if isinstance(command, (cmd.LsLbAddCommand, cmd.LsLbDelCommand)): LOG.warning('action lb to ls fail because ls ' '%s is not found, keep going on...', getattr(command, 'switch', '')) else: raise def _get_lb_to_lr_association_commands( self, ovn_lb, ovn_lr, delete=False, is_sync=False): lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY) if delete: return self._del_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref) return self._add_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref, is_sync=is_sync) def _find_ls_for_lr(self, router, ip_version): ls = [] for port in router.ports: if port.gateway_chassis or port.ha_chassis_group: continue if netaddr.IPNetwork(port.networks[0]).version != ip_version: continue port_network_name = port.external_ids.get( ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY) if port_network_name: ls.append(utils.ovn_name(port_network_name)) return ls def _find_lr_of_ls(self, ovn_ls, subnet_gateway_ip=None): lsp_router_port = None for port in ovn_ls.ports or []: if (port.type == 'router' and port.external_ids.get( ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY) == n_const.DEVICE_OWNER_ROUTER_INTF): if subnet_gateway_ip: for port_cidr in port.external_ids[ ovn_const.OVN_PORT_CIDR_EXT_ID_KEY].split(): port_ip = netaddr.IPNetwork(port_cidr).ip if netaddr.IPAddress(subnet_gateway_ip) == port_ip: break else: continue lsp_router_port = port break else: return lrp_name = lsp_router_port.options.get('router-port') if not lrp_name: return lrs = self.ovn_nbdb_api.get_lrs().execute(check_error=True) for lr in lrs: for lrp in lr.ports: if lrp.name == lrp_name: return lr # Handles networks with only gateway port in the router if (utils.ovn_lrouter_port_name( lr.external_ids.get(ovn_const.OVN_GW_PORT_EXT_ID_KEY)) == lrp_name): return lr def _get_listener_key(self, listener_id, is_enabled=True): listener_key = ovn_const.LB_EXT_IDS_LISTENER_PREFIX + str(listener_id) if not is_enabled: listener_key += ':' + ovn_const.DISABLED_RESOURCE_SUFFIX return listener_key def _get_pool_key(self, pool_id, is_enabled=True): pool_key = ovn_const.LB_EXT_IDS_POOL_PREFIX + str(pool_id) if not is_enabled: pool_key += ':' + ovn_const.DISABLED_RESOURCE_SUFFIX return pool_key def _extract_member_info(self, member): mem_info = [] if member: for mem in member.split(','): mem_split = mem.split('_') mem_id = mem_split[1] mem_ip_port = mem_split[2] mem_ip, mem_port = mem_ip_port.rsplit(':', 1) mem_subnet = mem_split[3] mem_info.append((mem_ip, mem_port, mem_subnet, mem_id)) return mem_info def _get_member_info(self, member): member_info = '' if isinstance(member, dict): subnet_id = member.get(constants.SUBNET_ID, '') member_info = ( f'{ovn_const.LB_EXT_IDS_MEMBER_PREFIX}{member[constants.ID]}_' f'{member[constants.ADDRESS]}:' f'{member[constants.PROTOCOL_PORT]}_{subnet_id}') elif isinstance(member, o_datamodels.Member): subnet_id = member.subnet_id or '' member_info = ( f'{ovn_const.LB_EXT_IDS_MEMBER_PREFIX}{member.member_id}_' f'{member.address}:{member.protocol_port}_{subnet_id}') return member_info def _make_listener_key_value(self, listener_port, pool_id): return str(listener_port) + ':' + pool_id def _extract_listener_key_value(self, listener_value): v = listener_value.split(':') if len(v) == 2: return (v[0], v[1]) else: return (None, None) def _is_listener_disabled(self, listener_key): v = listener_key.split(':') if len(v) == 2 and v[1] == ovn_const.DISABLED_RESOURCE_SUFFIX: return True return False def _get_pool_listeners(self, ovn_lb, pool_key): pool_listeners = [] for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k: continue vip_port, p_key = self._extract_listener_key_value(v) if pool_key == p_key: pool_listeners.append( k[len(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):]) return pool_listeners def _get_pool_listener_port(self, ovn_lb, pool_key): for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k: continue vip_port, p_key = self._extract_listener_key_value(v) if pool_key == p_key: return vip_port return None def _is_member_offline(self, ovn_lb, member_id): return constants.OFFLINE == self._find_member_status(ovn_lb, member_id) def _frame_vip_ips(self, ovn_lb, lb_external_ids): vip_ips = {} # If load balancer is disabled, return if lb_external_ids.get('enabled') == 'False': return vip_ips lb_vips = [] if ovn_const.LB_EXT_IDS_VIP_KEY in lb_external_ids: lb_vips.append(lb_external_ids.get(ovn_const.LB_EXT_IDS_VIP_KEY)) if ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY in lb_external_ids: lb_vips.extend(lb_external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY).split(',')) vip_fip = lb_external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) additional_vip_fips = lb_external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY) for k, v in lb_external_ids.items(): if (ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k or self._is_listener_disabled(k)): continue vip_port, pool_id = self._extract_listener_key_value(v) if not vip_port or not pool_id: continue if pool_id not in lb_external_ids or not lb_external_ids[pool_id]: continue ips_v4 = [] ips_v6 = [] for mb_ip, mb_port, mb_subnet, mb_id in self._extract_member_info( lb_external_ids[pool_id]): if not self._is_member_offline(ovn_lb, mb_id): if netaddr.IPNetwork( mb_ip).version == n_const.IP_VERSION_6: ips_v6.append(f'[{mb_ip}]:{mb_port}') else: ips_v4.append(f'{mb_ip}:{mb_port}') for lb_vip in lb_vips: if ips_v4 and netaddr.IPNetwork( lb_vip).version == n_const.IP_VERSION_4: vip_ips[lb_vip + ':' + vip_port] = ','.join(ips_v4) if ips_v6 and netaddr.IPNetwork( lb_vip).version == n_const.IP_VERSION_6: lb_vip = f'[{lb_vip}]' vip_ips[lb_vip + ':' + vip_port] = ','.join(ips_v6) if ips_v4 and vip_fip: if netaddr.IPNetwork(vip_fip).version == n_const.IP_VERSION_4: vip_ips[vip_fip + ':' + vip_port] = ','.join(ips_v4) if ips_v4 and additional_vip_fips: for addi_vip_fip in additional_vip_fips.split(','): if netaddr.IPNetwork( addi_vip_fip).version == n_const.IP_VERSION_4: vip_ips[addi_vip_fip + ':' + vip_port] = ','.join( ips_v4) return vip_ips def _refresh_lb_vips(self, ovn_lb, lb_external_ids, is_sync=False): vip_ips = self._frame_vip_ips(ovn_lb, lb_external_ids) if is_sync and ovn_lb.vips == vip_ips: return [] return [self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb.uuid, 'vips'), self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('vips', vip_ips))] def _is_listener_in_lb(self, lb): for key in list(lb.external_ids): if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX): return True return False def _are_selection_fields_supported(self): return self.ovn_nbdb_api.is_col_present( 'Load_Balancer', 'selection_fields') @staticmethod def _get_selection_keys(lb_algorithm): # pylint: disable=multiple-statements return ovn_const.LB_SELECTION_FIELDS_MAP[lb_algorithm] def check_lb_protocol(self, lb_id, listener_protocol): ovn_lb = self._find_ovn_lbs(lb_id, protocol=listener_protocol) if not ovn_lb: return False elif not self._is_listener_in_lb(ovn_lb): return True else: return str(listener_protocol).lower() in ovn_lb.protocol def _get_port_from_info(self, neutron_client, port_id, network_id, address, subnet_required=True): port = None subnet = None if port_id: port = neutron_client.get_port(port_id) for ip in port.fixed_ips: if ip.get('ip_address') == address: if subnet_required: subnet = neutron_client.get_subnet(ip.get('subnet_id')) break elif network_id and address: ports = self._neutron_list_ports(neutron_client, network_id=network_id) for p in ports: for ip in p.fixed_ips: if ip.get('ip_address') == address: port = p if subnet_required: subnet = neutron_client.get_subnet( ip.get('subnet_id')) break return port, subnet def lb_sync(self, loadbalancer, ovn_lb): """Sync LoadBalancer object with an OVN LoadBalancer The method performs the following steps: 1. Retrieves the port and subnet of the VIP 2. Builds `external_ids` based on the information from the LoadBalancer 3. Compares the constructed `external_ids` with the OVN LoadBalancer's `external_ids`. 4. If there are differences, updates the OVN LoadBalancer's `external_ids`. 5. Builds `selection_fields` based on the information from the LoadBalancer. 6. Compares the constructed `selection_fields` with the OVN LoadBalancer's `selection_fields`. 7. If there are differences, updates the OVN LoadBalancer's `selection_fields`. 8. Updates the `ls_lb` references in the OVN LoadBalancer. 9. Updates the `lr_lb` references in the OVN LoadBalancer. :param loadbalancer: The source LoadBalancer object from Octavia DB :param ovn_lb: The OVN LoadBalancer object that needs to be sync """ commands = [] port = None subnet = None neutron_client = self._get_neutron_client() if not neutron_client: return port, subnet = self._get_vip_port_and_subnet_from_lb( neutron_client, loadbalancer.get(constants.VIP_PORT_ID, None), loadbalancer.get(constants.VIP_NETWORK_ID, None), loadbalancer.get(constants.VIP_ADDRESS, None)) if not port or not subnet: return external_ids = self._build_external_ids(loadbalancer, port) self._sync_external_ids(ovn_lb, external_ids, commands) selection_fields = self._build_selection_fields(loadbalancer) self._sync_selection_fields(ovn_lb, selection_fields, commands) try: self._execute_commands(commands) except Exception as e: LOG.exception("Failed to execute commands for load balancer " f"sync: {e}") return # If protocol set make sure its lowercase protocol = ovn_lb.protocol[0].lower() if ovn_lb.protocol else None try: ovn_lb = self._find_ovn_lbs_with_retry( loadbalancer[constants.ID], protocol=protocol) ovn_lb = ovn_lb if protocol else ovn_lb[0] self._sync_lb_associations(neutron_client, ovn_lb, port, subnet, loadbalancer) except idlutils.RowNotFound: LOG.exception(f"OVN LoadBalancer {loadbalancer[constants.ID]} not " "found on OVN NB DB.") except Exception as e: LOG.exception("Failed syncing lb associations on LS and LR for " f"load balancer sync: {e}") def lb_create(self, loadbalancer, protocol=None): port = None subnet = None additional_ports = [] try: neutron_client = clients.get_neutron_client() port, subnet = self._get_port_from_info( neutron_client, loadbalancer.get(constants.VIP_PORT_ID, None), loadbalancer.get(constants.VIP_NETWORK_ID, None), loadbalancer.get(constants.VIP_ADDRESS, None)) if loadbalancer.get(constants.ADDITIONAL_VIPS): for additional_vip_port in loadbalancer.get( constants.ADDITIONAL_VIPS): ad_port, ad_subnet = self._get_port_from_info( neutron_client, additional_vip_port.get('port_id', None), additional_vip_port.get(constants.NETWORK_ID, None), additional_vip_port.get('ip_address', None), False) additional_ports.append(ad_port) except Exception: LOG.error('Cannot get info from neutron') LOG.exception(ovn_const.EXCEPTION_MSG, "creation of loadbalancer") # Any Exception set the status to ERROR if port: try: self.delete_port(port.id) LOG.warning("Deleting the VIP port %s since LB went into " "ERROR state", str(port.id)) except Exception: LOG.exception("Error deleting the VIP port %s upon " "loadbalancer %s creation failure", str(port.id), str(loadbalancer[constants.ID])) for addi_port in additional_ports: try: self.delete_port(addi_port.id) LOG.warning("Deleting the additional VIP port %s " "since LB went into ERROR state", str(addi_port.id)) except Exception: LOG.exception("Error deleting the additional VIP port " "%s upon loadbalancer %s creation " "failure", str(addi_port.id), str(loadbalancer[constants.ID])) status = { constants.LOADBALANCERS: [ {constants.ID: loadbalancer[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR, constants.OPERATING_STATUS: constants.ERROR}]} return status # If protocol set make sure its lowercase protocol = protocol.lower() if protocol else [] # In case port is not found for the vip_address we will see an # exception when port['id'] is accessed. external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: loadbalancer[constants.VIP_ADDRESS], ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: loadbalancer.get(constants.VIP_PORT_ID) or port.id, 'enabled': str(loadbalancer[constants.ADMIN_STATE_UP])} # In case additional_vips was passed if loadbalancer.get(constants.ADDITIONAL_VIPS): addi_vip = [x['ip_address'] for x in loadbalancer.get(constants.ADDITIONAL_VIPS)] addi_vip_port_id = [x['port_id'] for x in loadbalancer.get( constants.ADDITIONAL_VIPS)] addi_vip = ','.join(addi_vip) addi_vip_port_id = ','.join(addi_vip_port_id) external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY] = addi_vip external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY] = \ addi_vip_port_id # In case vip_fip was passed - use it. vip_fip = loadbalancer.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) if vip_fip: external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = vip_fip # In case additional_vip_fip was passed - use it. additional_vip_fip = loadbalancer.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY) if additional_vip_fip: external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY] = \ additional_vip_fip # In case of lr_ref passed - use it. lr_ref = loadbalancer.get(ovn_const.LB_EXT_IDS_LR_REF_KEY) if lr_ref: external_ids[ovn_const.LB_EXT_IDS_LR_REF_KEY] = lr_ref # In case we have LB algoritm set lb_algorithm = loadbalancer.get(constants.LB_ALGORITHM) kwargs = { 'name': loadbalancer[constants.ID], 'protocol': protocol, 'external_ids': external_ids} if self._are_selection_fields_supported(): kwargs['selection_fields'] = self._get_selection_keys(lb_algorithm) try: self.ovn_nbdb_api.db_create( 'Load_Balancer', **kwargs).execute(check_error=True) ovn_lb = self._find_ovn_lbs( loadbalancer[constants.ID], protocol=protocol) ovn_lb = ovn_lb if protocol else ovn_lb[0] # NOTE(ltomasbo): If the VIP is on a provider network, it does # not need to be associated to its LS network = neutron_client.get_network(port.network_id) if not network.provider_physical_network: # NOTE(froyo): This is the association of the lb to the VIP ls # so this is executed right away. For the additional vip ports # this step is not required since all subnets must belong to # the same subnet, so just for the VIP LB port is enough. self._update_lb_to_ls_association( ovn_lb, network_id=port.network_id, associate=True, update_ls_ref=True, additional_vips=True) ls_name = utils.ovn_name(port.network_id) ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( check_error=True) ovn_lr = self._find_lr_of_ls(ovn_ls, subnet.gateway_ip) if ovn_lr: try: # NOTE(froyo): This is the association of the lb to the # router associated to VIP ls and all ls connected to that # router we try atomically, if it fails we will go step by # step, discarding the associations from lb to a # non-existent ls, but we will demand the association of # lb to lr self._update_lb_to_lr_association(ovn_lb, ovn_lr) except idlutils.RowNotFound: LOG.warning("The association of loadbalancer %s to the " "logical router %s failed, trying step by " "step", ovn_lb.uuid, ovn_lr.uuid) self._update_lb_to_lr_association_by_step(ovn_lb, ovn_lr) # NOTE(mjozefcz): In case of LS references where passed - # apply LS to the new LB. That could happend in case we # need another loadbalancer for other L4 protocol. ls_refs = loadbalancer.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) if ls_refs: try: ls_refs = jsonutils.loads(ls_refs) except ValueError: ls_refs = {} for ls in ls_refs: # Skip previously added LS because we don't want # to duplicate. if ls == ovn_ls.name: continue self._update_lb_to_ls_association( ovn_lb, network_id=utils.ovn_uuid(ls), associate=True, update_ls_ref=True) operating_status = constants.ONLINE # The issue is that since OVN doesnt support any HMs, # we ideally should never put the status as 'ONLINE' if not loadbalancer.get(constants.ADMIN_STATE_UP, True): operating_status = constants.OFFLINE status = { constants.LOADBALANCERS: [ {constants.ID: loadbalancer[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: operating_status}]} # If the connection with the OVN NB db server is broken, then # ovsdbapp will throw either TimeOutException or RunTimeError. # May be we can catch these specific exceptions. # It is important to report the status to octavia. We can report # immediately or reschedule the lb_create request later. # For now lets report immediately. except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "creation of loadbalancer") # Any Exception set the status to ERROR if port: try: self.delete_port(port.id) LOG.warning("Deleting the VIP port %s since LB went into " "ERROR state", str(port.id)) except Exception: LOG.exception("Error deleting the VIP port %s upon " "loadbalancer %s creation failure", str(port.id), str(loadbalancer[constants.ID])) for addi_port in additional_ports: try: self.delete_port(addi_port.id) LOG.warning("Deleting the additional VIP port %s " "since LB went into ERROR state", str(addi_port.id)) except Exception: LOG.exception("Error deleting the additional VIP port " "%s upon loadbalancer %s creation " "failure", str(addi_port.id), str(loadbalancer[constants.ID])) status = { constants.LOADBALANCERS: [ {constants.ID: loadbalancer[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR, constants.OPERATING_STATUS: constants.ERROR}]} return status def lb_delete(self, loadbalancer): port_id = None lbalancer_status = { constants.ID: loadbalancer[constants.ID], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: constants.OFFLINE} status = { constants.LOADBALANCERS: [lbalancer_status], constants.LISTENERS: [], constants.POOLS: [], constants.MEMBERS: []} ovn_lbs = None try: ovn_lbs = self._find_ovn_lbs(loadbalancer[constants.ID]) except idlutils.RowNotFound: LOG.warning("Loadbalancer %s not found in OVN Northbound DB. " "Setting the Loadbalancer status to DELETED " "in Octavia", str(loadbalancer[constants.ID])) # NOTE(ltomasbo): In case the previous loadbalancer deletion # action failed at VIP deletion step, this ensures the VIP # is not leaked try: # from api to clean also those ports vip_port_id = self._get_vip_port_from_loadbalancer_id( loadbalancer[constants.ID]) if vip_port_id: LOG.warning("Deleting the VIP port %s associated to LB " "missing in OVN DBs", str(vip_port_id)) self.delete_port(vip_port_id) except Exception: LOG.exception("Error deleting the VIP port %s", str(vip_port_id)) lbalancer_status[constants.PROVISIONING_STATUS] = ( constants.ERROR) lbalancer_status[constants.OPERATING_STATUS] = constants.ERROR try: additional_vip_port_ids = \ self._get_additional_vips_from_loadbalancer_id( loadbalancer[constants.ID]) addi_port_id = '' for additional_port in additional_vip_port_ids: addi_port_id = additional_port['port_id'] LOG.warning("Deleting additional VIP port %s " "associated to LB missing in OVN DBs", str(addi_port_id)) self.delete_port(addi_port_id) except Exception: LOG.exception("Error deleting the additional VIP port %s", str(addi_port_id)) lbalancer_status[constants.PROVISIONING_STATUS] = ( constants.ERROR) lbalancer_status[constants.OPERATING_STATUS] = constants.ERROR return status try: port_id = ovn_lbs[0].external_ids[ ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY] additional_vip_port_ids = ovn_lbs[0].external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY, None) for ovn_lb in ovn_lbs: status = self._lb_delete(loadbalancer, ovn_lb, status) # Clear the status dict of any key having [] value # Python 3.6 doesnt allow deleting an element in a # dict while iterating over it. So first get a list of keys. # https://cito.github.io/blog/never-iterate-a-changing-dict/ status = {key: value for key, value in status.items() if value} # Delete VIP port from neutron. self.delete_port(port_id) # Also delete additional_vip ports from neutron. if additional_vip_port_ids: for addit_vip_port_id in additional_vip_port_ids.split(','): self.delete_port(addit_vip_port_id) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of loadbalancer") lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR lbalancer_status[constants.OPERATING_STATUS] = constants.ERROR return status def _lb_delete(self, loadbalancer, ovn_lb, status): commands = [] member_subnets = [] clean_up_hm_port_required = False if loadbalancer['cascade']: # Delete all pools for key, value in ovn_lb.external_ids.items(): if key.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX): pool_id = key.split('_')[1] # Delete all members in the pool if value and len(value.split(',')) > 0: for mem_info in value.split(','): member_subnets.append(mem_info.split('_')[3]) member_id = mem_info.split("_")[1] member_ip = mem_info.split('_')[2].split(":")[0] member_port = mem_info.split('_')[2].split(":")[1] member_subnet = mem_info.split("_")[3] member = { 'id': member_id, 'address': member_ip, 'protocol_port': member_port, 'pool_id': pool_id, 'subnet_id': member_subnet} self.member_delete(member) member_info = { 'id': member_id, 'address': member_ip, 'pool_id': pool_id, 'subnet_id': member_subnet, 'action': ovn_const.REQ_INFO_MEMBER_DELETED} self.handle_member_dvr(member_info) status[constants.MEMBERS].append({ constants.ID: mem_info.split('_')[1], constants.PROVISIONING_STATUS: constants.DELETED}) status[constants.POOLS].append( {constants.ID: pool_id, constants.PROVISIONING_STATUS: constants.DELETED}) if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX): status[constants.LISTENERS].append({ constants.ID: key.split('_')[1], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: constants.OFFLINE}) if ovn_lb.health_check: clean_up_hm_port_required = True commands.append( self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb.uuid, 'health_check')) ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY, {}) if ls_refs: try: ls_refs = jsonutils.loads(ls_refs) except ValueError: ls_refs = {} for ls_name in ls_refs.keys(): try: ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( check_error=True) commands.append( self.ovn_nbdb_api.ls_lb_del(ovn_ls.uuid, ovn_lb.uuid)) except idlutils.RowNotFound: LOG.warning("LogicalSwitch %s could not be found. Cannot " "delete Load Balancer from it", ls_name) # Delete LB from all Networks the LB is indirectly associated for ls in self._find_lb_in_table(ovn_lb, 'Logical_Switch'): commands.append( self.ovn_nbdb_api.ls_lb_del(ls.uuid, ovn_lb.uuid, if_exists=True)) lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY, {}) if lr_ref: try: lr = self.ovn_nbdb_api.lookup('Logical_Router', lr_ref) commands.append(self.ovn_nbdb_api.lr_lb_del( lr.uuid, ovn_lb.uuid)) except idlutils.RowNotFound: pass # Delete LB from all Routers the LB is indirectly associated for lr in self._find_lb_in_table(ovn_lb, 'Logical_Router'): commands.append( self.ovn_nbdb_api.lr_lb_del(lr.uuid, ovn_lb.uuid, if_exists=True)) commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid)) try: self._execute_commands(commands) except idlutils.RowNotFound: # NOTE(froyo): If any of the Ls or Lr had been deleted between # time to list and time to execute txn, we will received a # RowNotFound exception, if this case we will run every command # one by one passing exception in case the command is related to # deletion of Ls or Lr already deleted. Any other case will raise # exception and upper function will report the LB in ERROR status for command in commands: try: command.execute(check_error=True) except idlutils.RowNotFound: if isinstance(command, (cmd.LsLbDelCommand)): LOG.warning('delete lb from ls fail because ls ' '%s is not found, keep going on...', getattr(command, 'switch', '')) elif isinstance(command, (cmd.LrLbDelCommand)): LOG.warning('delete lb to lr fail because lr ' '%s is not found, keep going on...', getattr(command, 'router', '')) else: raise # NOTE(froyo): we should remove the hm-port if the LB was using a HM # and no more LBs are using it if clean_up_hm_port_required: for subnet_id in list(set(member_subnets)): self._clean_up_hm_port(subnet_id) return status def lb_update(self, loadbalancer): lb_status = {constants.ID: loadbalancer[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE} status = {constants.LOADBALANCERS: [lb_status]} if constants.ADMIN_STATE_UP not in loadbalancer: return status lb_enabled = loadbalancer[constants.ADMIN_STATE_UP] try: ovn_lbs = self._find_ovn_lbs(loadbalancer[constants.ID]) # It should be unique for all the LBS for all protocols, # so we could just easly loop over all defined for given # Octavia LB. for ovn_lb in ovn_lbs: if str(ovn_lb.external_ids['enabled']) != str(lb_enabled): commands = [] enable_info = {'enabled': str(lb_enabled)} ovn_lb.external_ids['enabled'] = str(lb_enabled) commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', enable_info)) ) commands.extend( self._refresh_lb_vips(ovn_lb, ovn_lb.external_ids)) self._execute_commands(commands) if lb_enabled: operating_status = constants.ONLINE else: operating_status = constants.OFFLINE lb_status[constants.OPERATING_STATUS] = operating_status except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "update of loadbalancer") lb_status[constants.PROVISIONING_STATUS] = constants.ERROR lb_status[constants.OPERATING_STATUS] = constants.ERROR return status def _get_vip_port_from_loadbalancer_id(self, lb_id): lb = self._octavia_driver_lib.get_loadbalancer(lb_id) lb_vip_port_id = lb.vip_port_id if lb and lb.vip_port_id else None return lb_vip_port_id def _get_additional_vips_from_loadbalancer_id(self, lb_id): lb = self._octavia_driver_lib.get_loadbalancer(lb_id) additional_vips = [] if lb and lb.additional_vips: for vip in lb.additional_vips: additional_vips.append({ 'ip_address': vip['ip_address'], constants.NETWORK_ID: vip[constants.NETWORK_ID], 'port_id': vip['port_id'], constants.SUBNET_ID: vip[constants.SUBNET_ID] }) return additional_vips def listener_create(self, listener): ovn_lb = self._get_or_create_ovn_lb( listener[constants.LOADBALANCER_ID], listener[constants.PROTOCOL], listener[constants.ADMIN_STATE_UP]) external_ids = copy.deepcopy(ovn_lb.external_ids) listener_key = self._get_listener_key( listener[constants.ID], is_enabled=listener[constants.ADMIN_STATE_UP]) if listener.get(constants.DEFAULT_POOL_ID): pool_key = self._get_pool_key(listener[constants.DEFAULT_POOL_ID]) else: pool_key = '' external_ids[listener_key] = self._make_listener_key_value( listener[constants.PROTOCOL_PORT], pool_key) listener_info = {listener_key: external_ids[listener_key]} try: commands = [] commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', listener_info))) if not self._is_listener_in_lb(ovn_lb): commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('protocol', str(listener[constants.PROTOCOL]).lower()))) commands.extend(self._refresh_lb_vips(ovn_lb, external_ids)) self._execute_commands(commands) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "creation of listener") status = { constants.LISTENERS: [ {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR, constants.OPERATING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} return status operating_status = constants.ONLINE if not listener.get(constants.ADMIN_STATE_UP, True): operating_status = constants.OFFLINE if pool_key: for lb_hc in ovn_lb.health_check: if pool_key[len(ovn_const.LB_EXT_IDS_POOL_PREFIX):] == ( lb_hc.external_ids.get( ovn_const.LB_EXT_IDS_HM_POOL_KEY)): if not self._update_lbhc_vip_port( lb_hc, listener[constants.PROTOCOL_PORT]): operating_status = constants.ERROR status = { constants.LISTENERS: [ {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: operating_status}], constants.LOADBALANCERS: [ {constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} return status def listener_sync(self, listener, ovn_lb): """Sync Listener object with an OVN LoadBalancer The method performs the following steps: 1. Update listener key on OVN Loadbalancer external_ids if needed 2. Update OVN LoadBalancer protocol from Listener info if needed 3. Refresh OVN LoadBalancer vips :param listener: The source listener object from Octavia DB :param ovn_lb: The OVN LoadBalancer object that needs to be sync """ commands = [] external_ids = copy.deepcopy(ovn_lb.external_ids) listener_key, listener_info = self._build_listener_info( listener, external_ids) self._update_listener_key_if_needed( listener_key, listener_info, ovn_lb, commands) self._update_protocol_if_needed(listener, ovn_lb, commands) try: commands.extend(self._refresh_lb_vips( ovn_lb, external_ids, is_sync=True)) except Exception as e: LOG.exception(f"Failed to refresh LB VIPs: {e}") return try: self._execute_commands(commands) except Exception as e: LOG.exception(f"Failed to execute commands for listener sync: {e}") return def listener_delete(self, listener): status = { constants.LISTENERS: [ {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: constants.OFFLINE}], constants.LOADBALANCERS: [ {constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} try: ovn_lb = self._find_ovn_lbs( listener[constants.LOADBALANCER_ID], protocol=listener[constants.PROTOCOL]) except idlutils.RowNotFound: # Listener already deleted. return status external_ids = copy.deepcopy(ovn_lb.external_ids) listener_key = self._get_listener_key(listener[constants.ID]) if listener_key in external_ids: try: commands = [] commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (listener_key))) # Drop current listener from LB. del external_ids[listener_key] # Set LB protocol to undefined only if there are no more # listeners and pools defined in the LB. cmds, lb_to_delete = self._clean_lb_if_empty( ovn_lb, listener[constants.LOADBALANCER_ID], external_ids) commands.extend(cmds) # Do not refresh vips if OVN LB for given protocol # has pending delete operation. if not lb_to_delete: commands.extend( self._refresh_lb_vips(ovn_lb, external_ids)) self._execute_commands(commands) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of listener") status = { constants.LISTENERS: [ {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR, constants.OPERATING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} return status def listener_update(self, listener): # NOTE(mjozefcz): Based on # https://docs.openstack.org/api-ref/load-balancer/v2/?expanded=update-a-listener-detail # there is no possibility to update listener protocol or port. listener_status = {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE} lbalancer_status = { constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE} pool_status = [] status = { constants.LISTENERS: [listener_status], constants.LOADBALANCERS: [lbalancer_status], constants.POOLS: pool_status} try: ovn_lb = self._find_ovn_lbs( listener[constants.LOADBALANCER_ID], protocol=listener[constants.PROTOCOL]) except idlutils.RowNotFound: LOG.exception(ovn_const.EXCEPTION_MSG, "update of listener") # LB row not found during update of a listener. That is a problem. listener_status[constants.PROVISIONING_STATUS] = constants.ERROR lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR return status l_key_when_enabled = self._get_listener_key(listener[constants.ID]) l_key_when_disabled = self._get_listener_key( listener[constants.ID], is_enabled=False) external_ids = copy.deepcopy(ovn_lb.external_ids) if constants.ADMIN_STATE_UP not in listener and ( constants.DEFAULT_POOL_ID not in listener): return status l_key_to_add = {} if l_key_when_enabled in external_ids: present_l_key = l_key_when_enabled elif l_key_when_disabled in external_ids: present_l_key = l_key_when_disabled else: # Something is terribly wrong. This cannot happen. return status try: commands = [] new_l_key = None l_key_to_remove = None if constants.ADMIN_STATE_UP in listener: if listener[constants.ADMIN_STATE_UP]: # We need to enable the listener new_l_key = l_key_when_enabled listener_status[constants.OPERATING_STATUS] = ( constants.ONLINE) else: # We need to disable the listener new_l_key = l_key_when_disabled listener_status[constants.OPERATING_STATUS] = ( constants.OFFLINE) if present_l_key != new_l_key: external_ids[new_l_key] = external_ids[present_l_key] l_key_to_add[new_l_key] = external_ids[present_l_key] del external_ids[present_l_key] l_key_to_remove = present_l_key if l_key_to_remove: commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (l_key_to_remove))) else: new_l_key = present_l_key if constants.DEFAULT_POOL_ID in listener: pool_key = self._get_pool_key( listener[constants.DEFAULT_POOL_ID]) l_key_value = self._make_listener_key_value( listener[constants.PROTOCOL_PORT], pool_key) l_key_to_add[new_l_key] = l_key_value external_ids[new_l_key] = l_key_value pool_status.append( {constants.ID: listener[constants.DEFAULT_POOL_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}) if l_key_to_add: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', l_key_to_add))) commands.extend( self._refresh_lb_vips(ovn_lb, external_ids)) self._execute_commands(commands) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "update of listener") status = { constants.LISTENERS: [ {constants.ID: listener[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: listener[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} return status def pool_create(self, pool): ovn_lb = self._get_or_create_ovn_lb( pool[constants.LOADBALANCER_ID], pool[constants.PROTOCOL], pool[constants.ADMIN_STATE_UP], lb_algorithm=pool[constants.LB_ALGORITHM]) external_ids = copy.deepcopy(ovn_lb.external_ids) pool_key = self._get_pool_key( pool[constants.ID], is_enabled=pool[constants.ADMIN_STATE_UP]) external_ids[pool_key] = '' if pool[constants.LISTENER_ID]: listener_key = self._get_listener_key(pool[constants.LISTENER_ID]) # NOTE(froyo): checking is not already when ovn-db-sync-tool is # triggered, because listener_create could be added already if # pool is considered as default one if listener_key in ovn_lb.external_ids and \ str(pool_key) not in external_ids[listener_key]: external_ids[listener_key] = str( external_ids[listener_key]) + str(pool_key) persistence_timeout = None if pool.get(constants.SESSION_PERSISTENCE): persistence_timeout = pool[constants.SESSION_PERSISTENCE].get( constants.PERSISTENCE_TIMEOUT, '360') try: commands = [] commands.append(self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', external_ids))) if persistence_timeout: options = copy.deepcopy(ovn_lb.options) options[ovn_const.AFFINITY_TIMEOUT] = str(persistence_timeout) commands.append(self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('options', options))) self._execute_commands(commands) # Pool status will be set to Online after a member is added to it # or when it is created with listener. operating_status = constants.OFFLINE if pool[constants.LISTENER_ID]: operating_status = constants.ONLINE status = { constants.POOLS: [ {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: operating_status}], constants.LOADBALANCERS: [ {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} if pool[constants.LISTENER_ID]: listener_status = [ {constants.ID: pool[constants.LISTENER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}] status[constants.LISTENERS] = listener_status except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "creation of pool") status = { constants.POOLS: [ {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} if pool[constants.LISTENER_ID]: listener_status = [ {constants.ID: pool[constants.LISTENER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}] status[constants.LISTENERS] = listener_status return status def pool_sync(self, pool, ovn_lb): """Sync Pool object with an OVN LoadBalancer The method performs the following steps: 1. Update pool key on OVN Loadbalancer external_ids if needed 2. Update OVN LoadBalancer options from Pool info :param pool: The source pool object from Octavia DB :param ovn_lb: The OVN LoadBalancer object that needs to be sync """ external_ids = self._prepare_external_ids(pool, ovn_lb) persistence_timeout = self._extract_persistence_timeout(pool) try: commands = [] self._add_external_ids_command(commands, ovn_lb, external_ids) if persistence_timeout: self._add_persistence_timeout_command(commands, ovn_lb, persistence_timeout) self._execute_commands(commands) except Exception as e: LOG.exception(f"Failed to execute commands for pool sync: {e}") def pool_delete(self, pool): status = { constants.POOLS: [ {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.DELETED}], constants.LOADBALANCERS: [ {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} try: ovn_lb = self._find_ovn_lbs( pool[constants.LOADBALANCER_ID], pool[constants.PROTOCOL]) except idlutils.RowNotFound: # LB row not found that means pool is deleted. return status pool_key = self._get_pool_key(pool[constants.ID]) commands = [] external_ids = copy.deepcopy(ovn_lb.external_ids) pool_listeners = [] try: pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) if pool_key in ovn_lb.external_ids: commands.append( self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid, 'external_ids', (pool_key))) del external_ids[pool_key] commands.extend( self._refresh_lb_vips(ovn_lb, external_ids)) # Remove Pool from Listener if it is associated for key, value in ovn_lb.external_ids.items(): if (key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX) and pool_key in value): external_ids[key] = value.split(':')[0] + ':' commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', external_ids))) pool_key_when_disabled = self._get_pool_key(pool[constants.ID], is_enabled=False) if pool_key_when_disabled in ovn_lb.external_ids: commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (pool_key_when_disabled))) if ovn_const.AFFINITY_TIMEOUT in ovn_lb.options: commands.append( self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid, 'options', (ovn_const.AFFINITY_TIMEOUT))) commands.extend( self._clean_lb_if_empty( ovn_lb, pool[constants.LOADBALANCER_ID], external_ids)[0]) self._execute_commands(commands) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of pool") status = { constants.POOLS: [ {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} listener_status = [] for listener in pool_listeners: listener_status.append( {constants.ID: listener, constants.PROVISIONING_STATUS: constants.ACTIVE}) status[constants.LISTENERS] = listener_status return status def pool_update(self, pool): pool_status = {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE} lbalancer_status = {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE} status = { constants.POOLS: [pool_status], constants.LOADBALANCERS: [lbalancer_status]} if (constants.ADMIN_STATE_UP not in pool and constants.SESSION_PERSISTENCE not in pool): return status try: ovn_lb = self._find_ovn_lbs( pool[constants.LOADBALANCER_ID], protocol=pool[constants.PROTOCOL]) except idlutils.RowNotFound: LOG.exception(ovn_const.EXCEPTION_MSG, "update of pool") # LB row not found during update of a listener. That is a problem. pool_status[constants.PROVISIONING_STATUS] = constants.ERROR lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR return status pool_key = self._get_pool_key(pool[constants.ID]) p_key_when_disabled = self._get_pool_key(pool[constants.ID], is_enabled=False) external_ids = copy.deepcopy(ovn_lb.external_ids) p_key_to_remove = None p_key_to_add = {} pool_listeners = [] commands = [] try: pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) admin_state_up = pool.get(constants.ADMIN_STATE_UP) if admin_state_up is not None: if admin_state_up: if p_key_when_disabled in external_ids: p_key_to_add[pool_key] = external_ids[ p_key_when_disabled] external_ids[pool_key] = external_ids[ p_key_when_disabled] del external_ids[p_key_when_disabled] p_key_to_remove = p_key_when_disabled else: if pool_key in external_ids: p_key_to_add[p_key_when_disabled] = external_ids[ pool_key] external_ids[p_key_when_disabled] = external_ids[ pool_key] del external_ids[pool_key] p_key_to_remove = pool_key if p_key_to_remove: commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (p_key_to_remove))) commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', p_key_to_add))) commands.extend( self._refresh_lb_vips(ovn_lb, external_ids)) if pool.get(constants.SESSION_PERSISTENCE): new_timeout = pool[constants.SESSION_PERSISTENCE].get( constants.PERSISTENCE_TIMEOUT, '360') options = copy.deepcopy(ovn_lb.options) options[ovn_const.AFFINITY_TIMEOUT] = str(new_timeout) commands.append(self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('options', options))) self._execute_commands(commands) if pool[constants.ADMIN_STATE_UP]: operating_status = constants.ONLINE else: operating_status = constants.OFFLINE pool_status[constants.OPERATING_STATUS] = operating_status except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "update of pool") status = { constants.POOLS: [ {constants.ID: pool[constants.ID], constants.PROVISIONING_STATUS: constants.ERROR}], constants.LOADBALANCERS: [ {constants.ID: pool[constants.LOADBALANCER_ID], constants.PROVISIONING_STATUS: constants.ACTIVE}]} listener_status = [] for listener in pool_listeners: listener_status.append( {constants.ID: listener, constants.PROVISIONING_STATUS: constants.ACTIVE}) status[constants.LISTENERS] = listener_status return status def _find_member_status(self, ovn_lb, member_id): # NOTE (froyo): Search on lb.external_ids under tag # neutron:member_status, if member not found we will return # NO_MONITOR try: existing_members = ovn_lb.external_ids.get( ovn_const.OVN_MEMBER_STATUS_KEY) existing_members = jsonutils.loads(existing_members) return existing_members[member_id] except TypeError: LOG.debug("no member status on external_ids: %s", str(existing_members)) except KeyError: LOG.debug("Error member_id %s not found on member_status", str(member_id)) return constants.NO_MONITOR def _update_member_statuses(self, ovn_lb, pool_id, provisioning_status, operating_status): member_statuses = [] existing_members = ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_POOL_PREFIX + str(pool_id)) if len(existing_members) > 0: for mem_info in existing_members.split(','): member_statuses.append({ constants.ID: mem_info.split('_')[1], constants.PROVISIONING_STATUS: provisioning_status, constants.OPERATING_STATUS: operating_status}) self._update_external_ids_member_status( ovn_lb, mem_info.split('_')[1], operating_status) return member_statuses def _update_external_ids_member_status(self, ovn_lb, member, status=None, delete=False): existing_members = ovn_lb.external_ids.get( ovn_const.OVN_MEMBER_STATUS_KEY) try: existing_members = jsonutils.loads(existing_members) except TypeError: LOG.debug("no member status on external_ids: %s", str(existing_members)) existing_members = {} if delete: if member in existing_members: del existing_members[member] else: existing_members[member] = status try: if existing_members: member_status = { ovn_const.OVN_MEMBER_STATUS_KEY: jsonutils.dumps(existing_members)} self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', member_status)).execute() else: self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (ovn_const.OVN_MEMBER_STATUS_KEY)).execute() except Exception: LOG.exception("Error storing member status on external_ids member:" " %s delete: %s status: %s", str(member), str(delete), str(status)) def _get_members_in_ovn_lb(self, ovn_lb, pool_key): existing_members = ovn_lb.external_ids.get(pool_key, None) if existing_members: existing_members = existing_members.split(",") return [ self._extract_member_info( member)[0] for member in existing_members ] else: return [] def member_sync(self, member, ovn_lb, pool_key): """Sync Member object with an OVN LoadBalancer The method performs the following steps: 1. Update pool key with member info on OVN Loadbalancer external_ids if needed 2. Update OVN LoadBalancer vips 3. Update references on LS or LR from the member if needed 4. Update OVN Loadbalancer member_status info on external_ids :param member: The source member object from Octavia DB :param ovn_lb: The OVN LoadBalancer object that needs to be sync :param pool_key: The pool_key where member is associated """ external_ids = copy.deepcopy(ovn_lb.external_ids) pool_data = self._update_pool_data(member, pool_key, external_ids) commands = [] if pool_data: self._add_pool_data_command(commands, ovn_lb, pool_data) external_ids[pool_key] = pool_data[pool_key] try: if member.get(constants.ADMIN_STATE_UP, False): commands.extend(self._refresh_lb_vips( ovn_lb, external_ids, is_sync=True)) except Exception as e: LOG.exception(f"Failed to refresh LB VIPs: {e}") return try: self._execute_commands(commands) except Exception as e: LOG.exception(f"Failed to execute commands for listener sync: {e}") return self._update_lb_to_ls_association( ovn_lb, subnet_id=member[constants.SUBNET_ID], associate=True, update_ls_ref=True, is_sync=True) # Make sure that all logical switches related to logical router # are associated with the load balancer. This is needed to handle # potential race that happens when lrp and lb are created at the # same time. ovn_lr = self._get_related_lr(member) if ovn_lr: self._sync_lb_to_lr_association(ovn_lb, ovn_lr) # TODO(froyo): Check if originally status in Octavia is ERROR if # we receive that info from the object self._update_external_ids_member_status( ovn_lb, member[constants.ID], constants.NO_MONITOR) def _add_member(self, member, ovn_lb, pool_key): external_ids = copy.deepcopy(ovn_lb.external_ids) existing_members = external_ids[pool_key] if existing_members: existing_members = existing_members.split(",") member_info = self._get_member_info(member) if member_info in existing_members: # Member already present return None if existing_members: existing_members.append(member_info) pool_data = {pool_key: ",".join(existing_members)} else: pool_data = {pool_key: member_info} commands = [] commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', pool_data))) external_ids[pool_key] = pool_data[pool_key] # NOTE(froyo): Add the member to the vips if it is enabled if member.get(constants.ADMIN_STATE_UP, False): commands.extend(self._refresh_lb_vips(ovn_lb, external_ids)) # Note (froyo): commands are now splitted to separate atomic process, # leaving outside the not mandatory ones to allow add_member # finish correctly self._execute_commands(commands) subnet_id = member[constants.SUBNET_ID] self._update_lb_to_ls_association( ovn_lb, subnet_id=subnet_id, associate=True, update_ls_ref=True) # Make sure that all logical switches related to logical router # are associated with the load balancer. This is needed to handle # potential race that happens when lrp and lb are created at the # same time. neutron_client = clients.get_neutron_client() ovn_lr = None try: subnet = neutron_client.get_subnet(subnet_id) ls_name = utils.ovn_name(subnet.network_id) ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( check_error=True) ovn_lr = self._find_lr_of_ls( ovn_ls, subnet.gateway_ip) except openstack.exceptions.ResourceNotFound: pass except idlutils.RowNotFound: pass if ovn_lr: try: self._update_lb_to_lr_association(ovn_lb, ovn_lr) except idlutils.RowNotFound: LOG.warning("The association of loadbalancer %s to the " "logical router %s failed, trying step by step", ovn_lb.uuid, ovn_lr.uuid) self._update_lb_to_lr_association_by_step(ovn_lb, ovn_lr) return member_info def member_create(self, member): new_member = None try: pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( member[constants.POOL_ID]) new_member = self._add_member(member, ovn_lb, pool_key) operating_status = constants.NO_MONITOR except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "creation of member") operating_status = constants.ERROR if not member[constants.ADMIN_STATE_UP]: operating_status = constants.OFFLINE elif (new_member and operating_status == constants.NO_MONITOR and ovn_lb.health_check): operating_status = constants.ONLINE mb_ip, mb_port, mb_subnet, mb_id = self._extract_member_info( new_member)[0] mb_status = self._update_hm_member(ovn_lb, pool_key, mb_ip) operating_status = ( constants.ERROR if mb_status != constants.ONLINE else mb_status ) self._update_external_ids_member_status( ovn_lb, member[constants.ID], operating_status) status = self._get_current_operating_statuses(ovn_lb) return status def _remove_member(self, member, ovn_lb, pool_key): external_ids = copy.deepcopy(ovn_lb.external_ids) existing_members = external_ids[pool_key].split(",") member_info = self._get_member_info(member) if member_info in existing_members: if ovn_lb.health_check: self._update_hm_member(ovn_lb, pool_key, member.get(constants.ADDRESS), delete=True) commands = [] existing_members.remove(member_info) if not existing_members: pool_status = constants.OFFLINE else: pool_status = constants.ONLINE pool_data = {pool_key: ",".join(existing_members)} commands.append( self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, ('external_ids', pool_data))) external_ids[pool_key] = ",".join(existing_members) commands.extend( self._refresh_lb_vips(ovn_lb, external_ids)) self._execute_commands(commands) self._update_lb_to_ls_association( ovn_lb, subnet_id=member.get(constants.SUBNET_ID), associate=False, update_ls_ref=True) return pool_status else: msg = f"Member {member[constants.ID]} not found in the pool" LOG.warning(msg) def _members_in_subnet(self, ovn_lb, subnet_id): for key, value in ovn_lb.external_ids.items(): if key.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX): if value and len(value.split(',')) > 0: for m_info in value.split(','): mem_id, mem_ip_port, mem_subnet = m_info.split('_')[1:] if mem_subnet == subnet_id: return True return False def member_delete(self, member): error_deleting_member = False try: pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( member[constants.POOL_ID]) self._remove_member(member, ovn_lb, pool_key) if ovn_lb.health_check: mem_subnet = member[constants.SUBNET_ID] if not self._members_in_subnet(ovn_lb, mem_subnet): # NOTE(froyo): if member is last member from the subnet # we should clean up the ovn-lb-hm-port. # We need to do this call after the cleaning of the # ip_port_mappings for the ovn LB. self._clean_up_hm_port(member[constants.SUBNET_ID]) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of member") error_deleting_member = True self._update_external_ids_member_status( ovn_lb, member[constants.ID], None, delete=True) status = self._get_current_operating_statuses(ovn_lb) status[constants.MEMBERS] = [ {constants.ID: member[constants.ID], constants.PROVISIONING_STATUS: constants.DELETED}] if error_deleting_member: status[constants.MEMBERS][0][constants.PROVISIONING_STATUS] = ( constants.ERROR) return status def member_update(self, member): try: error_updating_member = False pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( member[constants.POOL_ID]) member_operating_status = constants.NO_MONITOR last_status = self._find_member_status( ovn_lb, member[constants.ID]) if constants.ADMIN_STATE_UP in member: if member[constants.ADMIN_STATE_UP]: # if HM exists trust on neutron:member_status # as the last status valid for the member if ovn_lb.health_check: # search status of member_uuid member_operating_status = last_status else: member_operating_status = constants.NO_MONITOR else: member_operating_status = constants.OFFLINE self._update_external_ids_member_status( ovn_lb, member[constants.ID], member_operating_status) # NOTE(froyo): If we are toggling from/to OFFLINE due to an # admin_state_up change, in that case we should update vips if ( last_status != constants.OFFLINE and member_operating_status == constants.OFFLINE ) or ( last_status == constants.OFFLINE and member_operating_status != constants.OFFLINE ): commands = [] commands.extend(self._refresh_lb_vips(ovn_lb, ovn_lb.external_ids)) self._execute_commands(commands) except Exception: LOG.exception(ovn_const.EXCEPTION_MSG, "update of member") error_updating_member = True status = self._get_current_operating_statuses(ovn_lb) status[constants.MEMBERS] = [ {constants.ID: member[constants.ID], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: member_operating_status}] if error_updating_member: status[constants.MEMBERS][0][constants.PROVISIONING_STATUS] = ( constants.ERROR) return status def _get_existing_pool_members(self, pool_id): pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id) if not ovn_lb: msg = _("Loadbalancer with pool %s does not exist") % pool_key raise driver_exceptions.DriverError(msg) external_ids = dict(ovn_lb.external_ids) return external_ids[pool_key] def get_pool_member_id(self, pool_id, mem_addr_port=None): '''Gets Member information :param pool_id: ID of the Pool whose member information is reqd. :param mem_addr_port: Combination of Member Address+Port. Default=None :returns: UUID -- ID of the Member if member exists in pool. :returns: None -- if no member exists in the pool :raises: Exception if Loadbalancer is not found for a Pool ID ''' existing_members = self._get_existing_pool_members(pool_id) # Members are saved in OVN in the form of # member1_UUID_IP:Port, member2_UUID_IP:Port # Match the IP:Port for all members with the mem_addr_port # information and return the UUID. for meminf in existing_members.split(','): if mem_addr_port == meminf.split('_')[2]: return meminf.split('_')[1] def _create_neutron_port(self, neutron_client, name, project_id, net_id, subnet_id, address=None): port = {'name': name, 'network_id': net_id, 'fixed_ips': [{'subnet_id': subnet_id}], 'admin_state_up': True, 'project_id': project_id} if address: port['fixed_ips'][0]['ip_address'] = address try: return neutron_client.create_port(**port) except openstack.exceptions.ConflictException as e: # Sometimes the VIP is already created (race-conditions) # Lets get the it from Neutron API. port = self._neutron_find_port( neutron_client, network_id=net_id, name_or_id=f'{name}') if not port: LOG.error('Cannot create/get LoadBalancer VIP port with ' 'fixed IP: %s', address) raise e LOG.debug('VIP Port already exists, uuid: %s', port.id) return port except openstack.exceptions.HttpException as e: raise e def create_vip_port(self, project_id, lb_id, vip_d, additional_vip_dicts=None): neutron_client = clients.get_neutron_client() additional_vip_ports = [] vip_port = None try: vip_port = self._create_neutron_port( neutron_client, f'{ovn_const.LB_VIP_PORT_PREFIX}{lb_id}', project_id, vip_d.get(constants.VIP_NETWORK_ID), vip_d.get('vip_subnet_id'), vip_d.get(constants.VIP_ADDRESS, None)) if additional_vip_dicts: for index, additional_vip in enumerate(additional_vip_dicts, start=1): additional_vip_ports.append(self._create_neutron_port( neutron_client, f'{ovn_const.LB_VIP_ADDIT_PORT_PREFIX}{index}-{lb_id}', project_id, additional_vip.get(constants.NETWORK_ID), additional_vip.get('subnet_id'), additional_vip.get('ip_address', None))) return vip_port, additional_vip_ports except openstack.exceptions.HttpException as e: # NOTE (froyo): whatever other exception as e.g. Timeout # we should try to ensure no leftover port remains if vip_port: LOG.debug('Leftover port %s has been found. Trying to ' 'delete it', vip_port.id) self.delete_port(vip_port.id) for additional_vip in additional_vip_ports: LOG.debug('Leftover port %s has been found. Trying to ' 'delete it', additional_vip.id) self.delete_port(additional_vip.id) raise e @tenacity.retry( retry=tenacity.retry_if_exception_type( openstack.exceptions.HttpException), wait=tenacity.wait_exponential(max=75), stop=tenacity.stop_after_attempt(15), reraise=True) def delete_port(self, port_id): neutron_client = clients.get_neutron_client() try: neutron_client.delete_port(port_id) except openstack.exceptions.ResourceNotFound: LOG.warning("Port %s could not be found. Please " "check Neutron logs. Perhaps port " "was already deleted.", port_id) # NOTE(froyo): This could be removed in some cycles after Bobcat, this # check is created to ensure that LB HC vip field is correctly format like # IP:PORT def _check_lbhc_vip_format(self, vip): if vip: ip_port = vip.rsplit(':', 1) if len(ip_port) == 2 and ip_port[1].isdigit(): return True return False def _get_vip_lbhc(self, lbhc): vip = lbhc.external_ids.get(ovn_const.LB_EXT_IDS_HM_VIP, '') if vip: return vip else: if lbhc.vip: ip_port = lbhc.vip.rsplit(':', 1) if len(ip_port) == 2: return ip_port[0] return '' def handle_vip_fip(self, fip_info): ovn_lb = fip_info['ovn_lb'] additional_vip_fip = fip_info.get('additional_vip_fip', False) external_ids = copy.deepcopy(ovn_lb.external_ids) commands = [] need_ext_set = True need_hc_set = True if fip_info['action'] in ( ovn_const.REQ_INFO_ACTION_ASSOCIATE, ovn_const.REQ_INFO_ACTION_SYNC ): if additional_vip_fip: existing_addi_vip_fip = external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY, []) if existing_addi_vip_fip: existing_addi_vip_fip = existing_addi_vip_fip.split(',') existing_addi_vip_fip.append(fip_info['vip_fip']) external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY] = ( ','.join(existing_addi_vip_fip)) vip_fip_info = { ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY: ','.join(existing_addi_vip_fip)} else: external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = ( fip_info['vip_fip']) vip_fip_info = { ovn_const.LB_EXT_IDS_VIP_FIP_KEY: fip_info['vip_fip']} if fip_info['action'] == ovn_const.REQ_INFO_ACTION_SYNC: # Don't need to trigger OVN DB set if external_ids not changed need_ext_set = not all( ovn_lb.external_ids.get(k) == v for k, v in vip_fip_info.items() ) # For sync scenario, check if FIP VIP already in health_check for lb_hc in ovn_lb.health_check: # All lbhc in health_check are already checked # at this stage of sync workflow in hm_purge. # So we should be able to just check health_check. if self._get_vip_lbhc(lb_hc) == fip_info['vip_fip']: need_hc_set = False break if need_ext_set: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ( 'external_ids', vip_fip_info))) if need_hc_set: for lb_hc in ovn_lb.health_check: if self._get_vip_lbhc(lb_hc) in fip_info['vip_related']: vip = fip_info['vip_fip'] lb_hc_external_ids = copy.deepcopy(lb_hc.external_ids) lb_hc_external_ids[ovn_const.LB_EXT_IDS_HM_VIP] = vip if self._check_lbhc_vip_format(lb_hc.vip): port = lb_hc.vip.rsplit(':')[-1] vip += ':' + port else: vip = '' kwargs = { 'vip': vip, 'options': lb_hc.options, 'external_ids': lb_hc_external_ids} with self.ovn_nbdb_api.transaction( check_error=True) as txn: fip_lbhc = txn.add(self.ovn_nbdb_api.db_create( 'Load_Balancer_Health_Check', **kwargs)) txn.add(self.ovn_nbdb_api.db_add( 'Load_Balancer', ovn_lb.uuid, 'health_check', fip_lbhc)) else: # For disassociate case existing_addi_vip_fip_need_updated = False existing_addi_vip_fip = external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY, []) if existing_addi_vip_fip: existing_addi_vip_fip = existing_addi_vip_fip.split(',') if fip_info['vip_fip'] in existing_addi_vip_fip: existing_addi_vip_fip.remove(fip_info['vip_fip']) existing_addi_vip_fip_need_updated = True if existing_addi_vip_fip_need_updated: if existing_addi_vip_fip: vip_fip_info = { ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY: ','.join(existing_addi_vip_fip)} commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', vip_fip_info))) else: external_ids.pop(ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY) commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY))) if fip_info['vip_fip'] == external_ids.get( ovn_const.LB_EXT_IDS_VIP_FIP_KEY): external_ids.pop(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (ovn_const.LB_EXT_IDS_VIP_FIP_KEY))) for lb_hc in ovn_lb.health_check: # FIPs can only be ipv4, so not dealing with ipv6 [] here if self._get_vip_lbhc(lb_hc) == fip_info['vip_fip']: commands.append( self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid, 'health_check', lb_hc.uuid)) commands.append(self.ovn_nbdb_api.db_destroy( 'Load_Balancer_Health_Check', lb_hc.uuid)) break commands.extend( self._refresh_lb_vips( ovn_lb, external_ids, is_sync=( fip_info['action'] == ovn_const.REQ_INFO_ACTION_SYNC) ) ) self._execute_commands(commands) def handle_member_dvr(self, info): pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(info['pool_id']) if ((not ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)) and (not ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY))): LOG.debug("LB %(lb)s has no FIP on VIP configured. " "There is no need to centralize member %(member)s " "traffic.", {'lb': ovn_lb.uuid, 'member': info['id']}) return # Find out if member has FIP assigned. neutron_client = clients.get_neutron_client() try: subnet = neutron_client.get_subnet(info['subnet_id']) ls_name = utils.ovn_name(subnet.network_id) except openstack.exceptions.ResourceNotFound: LOG.exception('Subnet %s not found while trying to ' 'fetch its data.', info['subnet_id']) return try: ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name) except idlutils.RowNotFound: LOG.warning("Logical Switch %s not found. " "Cannot verify member FIP configuration.", ls_name) return fip = None f = utils.remove_macs_from_lsp_addresses for port in ls.ports: if info['address'] in f(port.addresses): # We found particular port fip = self.ovn_nbdb_api.db_find_rows( 'NAT', ('external_ids', '=', { ovn_const.OVN_FIP_PORT_EXT_ID_KEY: port.name}) ).execute(check_error=True) fip = fip[0] if fip else fip break if not fip: LOG.debug('Member %s has no FIP assigned. ' 'There is no need to modify its NAT.', info['id']) return if info['action'] == ovn_const.REQ_INFO_MEMBER_ADDED: LOG.info('Member %(member)s is added to Load Balancer %(lb)s ' 'and both have FIP assigned. Member FIP %(fip)s ' 'needs to be centralized in those conditions. ' 'Deleting external_mac/logical_port from it.', {'member': info['id'], 'lb': ovn_lb.uuid, 'fip': fip.external_ip}) self.ovn_nbdb_api.db_clear( 'NAT', fip.uuid, 'external_mac').execute(check_error=True) self.ovn_nbdb_api.db_clear( 'NAT', fip.uuid, 'logical_port').execute(check_error=True) else: LOG.info('Member %(member)s is deleted from Load Balancer ' '%(lb)s and both have FIP assigned. Member FIP %(fip)s ' 'can be decentralized now if environment has DVR ' 'enabled. Updating FIP object for recomputation.', {'member': info['id'], 'lb': ovn_lb.uuid, 'fip': fip.external_ip}) # NOTE(mjozefcz): We don't know if this env is DVR or not. # We should call neutron API to do 'empty' update of the FIP. # It will bump revision number and do recomputation of the FIP. try: fip_info = neutron_client.get_ip( fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY]) empty_update = { 'description': fip_info['description']} neutron_client.update_ip( fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY], **empty_update) except openstack.exceptions.ResourceNotFound: LOG.warning('Member %(member)s FIP %(fip)s not found in ' 'Neutron. Cannot update it.', {'member': info['id'], 'fip': fip.external_ip}) def get_lsp(self, port_id, network_id): ls_name = utils.ovn_name(network_id) try: ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name) except idlutils.RowNotFound: LOG.warn(f"Logical Switch {ls_name} not found.") return for port in ls.ports: if port_id in port.name: # We found particular port return port def _get_member_lsp(self, member_ip, member_subnet_id): neutron_client = clients.get_neutron_client() try: member_subnet = neutron_client.get_subnet(member_subnet_id) except openstack.exceptions.ResourceNotFound: LOG.exception('Subnet %s not found while trying to ' 'fetch its data.', member_subnet_id) return ls_name = utils.ovn_name(member_subnet.network_id) try: ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name) except idlutils.RowNotFound: LOG.warning("Logical Switch %s not found.", ls_name) return f = utils.remove_macs_from_lsp_addresses for port in ls.ports: if member_ip in f(port.addresses): # We found particular port return port def get_fip_from_vip(self, lb): neutron_client = clients.get_neutron_client() try: return list(neutron_client.ips(port_id=lb.vip_port_id)) except openstack.exceptions.HttpException as e: LOG.warn("Error on fetch fip for " f"{lb.loadbalancer_id} " f"Error: {str(e)}") def _add_lbhc(self, ovn_lb, pool_key, info): hm_id = info[constants.ID] status = {constants.ID: hm_id, constants.PROVISIONING_STATUS: constants.ERROR, constants.OPERATING_STATUS: constants.ERROR} # Example # MONITOR_PRT = 80 # ID=$(ovn-nbctl --bare --column _uuid find # Load_Balancer_Health_Check vip="${LB_VIP_ADDR}\:${MONITOR_PRT}") # In our case the monitor port will be the members protocol port vips = [] if ovn_const.LB_EXT_IDS_VIP_KEY in ovn_lb.external_ids: vips.append(ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_KEY)) if ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY in ovn_lb.external_ids: vips.extend(ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY).split(',')) fips = [] if ovn_const.LB_EXT_IDS_VIP_FIP_KEY in ovn_lb.external_ids: fips.append(ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_FIP_KEY)) if ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY in ovn_lb.external_ids: fips.extend(ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY).split(',')) if not vips: LOG.error("Could not find VIP for HM %s, LB external_ids: %s", hm_id, ovn_lb.external_ids) return status vip_port = self._get_pool_listener_port(ovn_lb, pool_key) # This is to enable lookups by Octavia DB ID value external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: hm_id, ovn_const.LB_EXT_IDS_HM_POOL_KEY: pool_key[ len(ovn_const.LB_EXT_IDS_POOL_PREFIX):], } operating_status = constants.ONLINE if not info['admin_state_up']: operating_status = constants.OFFLINE options = { 'interval': str(info['interval']), 'timeout': str(info['timeout']), 'success_count': str(info['success_count']), 'failure_count': str(info['failure_count'])} try: with self.ovn_nbdb_api.transaction(check_error=True) as txn: for vip in vips: # Just seems like this needs ovsdbapp support, see: # ovsdbapp/schema/ovn_northbound/impl_idl.py # - lb_add() # ovsdbapp/schema/ovn_northbound/commands.py # - LbAddCommand() # then this could just be self.ovn_nbdb_api.lb_hm_add() external_ids_vip = copy.deepcopy(external_ids) external_ids_vip[ovn_const.LB_EXT_IDS_HM_VIP] = vip if netaddr.IPNetwork(vip).version == n_const.IP_VERSION_6: vip = f'[{vip}]' kwargs = { 'vip': vip + ':' + str(vip_port) if vip_port else '', 'options': options, 'external_ids': external_ids_vip} hms_key = ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_HMS_KEY, []) if hms_key: hms_key = jsonutils.loads(hms_key) health_check = txn.add( self.ovn_nbdb_api.db_create( 'Load_Balancer_Health_Check', **kwargs)) txn.add(self.ovn_nbdb_api.db_add( 'Load_Balancer', ovn_lb.uuid, 'health_check', health_check)) hms_key.append(hm_id) txn.add(self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', {ovn_const.LB_EXT_IDS_HMS_KEY: jsonutils.dumps(hms_key)}))) if fips: external_ids_fip = copy.deepcopy(external_ids) for fip in fips: external_ids_fip[ovn_const.LB_EXT_IDS_HM_VIP] = fip if netaddr.IPNetwork( fip).version == n_const.IP_VERSION_6: fip = f'[{fip}]' fip_kwargs = { 'vip': fip + ':' + str(vip_port) if vip_port else '', 'options': options, 'external_ids': external_ids_fip} fip_health_check = txn.add( self.ovn_nbdb_api.db_create( 'Load_Balancer_Health_Check', **fip_kwargs)) txn.add(self.ovn_nbdb_api.db_add( 'Load_Balancer', ovn_lb.uuid, 'health_check', fip_health_check)) status = {constants.ID: hm_id, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: operating_status} except Exception: # Any Exception will return ERROR status LOG.exception(ovn_const.EXCEPTION_MSG, "set of health check") return status def _sync_lbhc(self, ovn_lb, pool_key, hm): hm_id = hm[constants.ID] # Example # MONITOR_PRT = 80 # ID=$(ovn-nbctl --bare --column _uuid find # Load_Balancer_Health_Check vip="${LB_VIP_ADDR}\:${MONITOR_PRT}") # In our case the monitor port will be the members protocol port vips = [] if ovn_const.LB_EXT_IDS_VIP_KEY in ovn_lb.external_ids: vips.append(ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_KEY)) if ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY in ovn_lb.external_ids: vips.extend(ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY).split(',')) fips = [] if ovn_const.LB_EXT_IDS_VIP_FIP_KEY in ovn_lb.external_ids: fips.append(ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_FIP_KEY)) if ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY in ovn_lb.external_ids: fips.extend(ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY).split(',')) if not vips: msg = (f"Could not find VIP for HM {hm_id}, LB external_ids: " f"{ovn_lb.external_ids}") raise driver_exceptions.DriverError(msg) vip_port = self._get_pool_listener_port(ovn_lb, pool_key) # This is to enable lookups by Octavia DB ID value external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: hm_id, ovn_const.LB_EXT_IDS_HM_POOL_KEY: pool_key[ len(ovn_const.LB_EXT_IDS_POOL_PREFIX):], } options = { 'interval': str(hm['interval']), 'timeout': str(hm['timeout']), 'success_count': str(hm['success_count']), 'failure_count': str(hm['failure_count'])} try: with self.ovn_nbdb_api.transaction(check_error=True) as txn: for vip in vips: recreate = False # Just seems like this needs ovsdbapp support, see: # ovsdbapp/schema/ovn_northbound/impl_idl.py # - lb_add() # ovsdbapp/schema/ovn_northbound/commands.py # - LbAddCommand() # then this could just be self.ovn_nbdb_api.lb_hm_add() external_ids_vip = copy.deepcopy(external_ids) external_ids_vip[ovn_const.LB_EXT_IDS_HM_VIP] = vip if netaddr.IPNetwork(vip).version == n_const.IP_VERSION_6: vip = f'[{vip}]' kwargs = { 'vip': vip + ':' + str(vip_port) if vip_port else '', 'options': options, 'external_ids': external_ids_vip} hms_key = ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_HMS_KEY, []) if hms_key: hms_key = jsonutils.loads(hms_key) lbhcs, _ = self._find_ovn_lb_from_hm_id(hm_id) if not lbhcs: recreate = True for lbhc in lbhcs: commands = [] if lbhc.vip != kwargs.get('vip'): commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', lbhc.uuid, ('vip', kwargs.get('vip')))) if lbhc.options != options: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', lbhc.uuid, ('options', options))) if lbhc.external_ids != external_ids_vip: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', lbhc.uuid, ('external_ids', external_ids_vip))) found_in_exist = False for hc in ovn_lb.health_check: if str(hc.uuid) == str(lbhc.uuid): found_in_exist = True break if not found_in_exist: commands.append( self.ovn_nbdb_api.db_add( 'Load_Balancer', ovn_lb.uuid, ('health_check', lbhc.uuid))) if hm_id not in hms_key: hms_key.append(hm_id) commands.append(self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', {ovn_const.LB_EXT_IDS_HMS_KEY: jsonutils.dumps(hms_key)}))) self._execute_commands(commands) if recreate: health_check = txn.add( self.ovn_nbdb_api.db_create( 'Load_Balancer_Health_Check', **kwargs)) txn.add(self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', health_check, ('vip', kwargs.get('vip')))) txn.add(self.ovn_nbdb_api.db_add( 'Load_Balancer', ovn_lb.uuid, 'health_check', health_check)) if hm_id not in hms_key: hms_key.append(hm_id) txn.add(self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', {ovn_const.LB_EXT_IDS_HMS_KEY: jsonutils.dumps(hms_key)}))) if fips: external_ids_fip = copy.deepcopy(external_ids) for fip in fips: recreate = False external_ids_fip[ovn_const.LB_EXT_IDS_HM_VIP] = fip if netaddr.IPNetwork( fip).version == n_const.IP_VERSION_6: fip = f'[{fip}]' fip_kwargs = { 'vip': fip + ':' + str(vip_port) if vip_port else '', 'options': options, 'external_ids': external_ids_fip} lbhcs, _ = self._find_ovn_lb_from_hm_id(hm_id) if not lbhcs: recreate = True for lbhc in lbhcs: commands = [] if lbhc.vip != fip_kwargs['vip']: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', lbhc.uuid, ('vip', fip_kwargs['vip']))) if lbhc.options != options: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', lbhc.uuid, ('options', options))) if lbhc.external_ids != external_ids_vip: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', lbhc.uuid, ('external_ids', external_ids_fip))) self._execute_commands(commands) if recreate: fip_health_check = txn.add( self.ovn_nbdb_api.db_create( 'Load_Balancer_Health_Check', **fip_kwargs)) txn.add(self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', fip_health_check, ('vip', fip_kwargs['vip']))) txn.add(self.ovn_nbdb_api.db_add( 'Load_Balancer', ovn_lb.uuid, 'health_check', fip_health_check)) except Exception as e: msg = (f"Error syncing Load Balancer Health Check: {e}") raise driver_exceptions.DriverError(msg) def _update_lbhc_vip_port(self, lbhc, vip_port): if lbhc.vip: vip = lbhc.vip.rsplit(":")[0] + ':' + str(vip_port) else: # If initially the lbhc was created with no port info, vip field # will be empty, so get it from lbhc external_ids vip = lbhc.external_ids.get(ovn_const.LB_EXT_IDS_HM_VIP, '') if vip: if netaddr.IPNetwork(vip).version == n_const.IP_VERSION_6: vip = f'[{vip}]' vip = vip + ':' + str(vip_port) commands = [] commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', lbhc.uuid, ('vip', vip))) self._execute_commands(commands) return True def _update_ip_port_mappings( self, ovn_lb, backend_ip, port_name, src_ip, pool_key, delete=False): # ip_port_mappings:${MEMBER_IP}=${LSP_NAME_MEMBER}:${HEALTH_SRC} # where: # MEMBER_IP: IP of member_lsp # LSP_NAME_MEMBER: Logical switch port # HEALTH_SRC: source IP of hm_port if delete: # Before removing a member from ip_port_mappings, # make sure no other # pool uses the same member. other_members = [] for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_POOL_PREFIX in k and k != pool_key: other_members.extend(self._extract_member_info( ovn_lb.external_ids[k])) member_statuses = ovn_lb.external_ids.get( ovn_const.OVN_MEMBER_STATUS_KEY) try: member_statuses = jsonutils.loads(member_statuses) except TypeError: LOG.debug("No member status in external_ids: %s", str(member_statuses)) member_statuses = {} execute_delete = True for member_id in [item[3] for item in other_members if item[0] == backend_ip]: if member_statuses.get(member_id, '') != constants.NO_MONITOR: execute_delete = False LOG.debug( f"Backend {backend_ip} still in use by member" f" {member_id}, " f"so it won't be removed" ) break if execute_delete: LOG.debug(f"Removing ip_port_mapping for {backend_ip}") self.ovn_nbdb_api.lb_del_ip_port_mapping( ovn_lb.uuid, backend_ip).execute() else: self.ovn_nbdb_api.lb_add_ip_port_mapping(ovn_lb.uuid, backend_ip, port_name, src_ip).execute() def _clean_ip_port_mappings(self, ovn_lb, pool_key=None): if not pool_key: self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb.uuid, 'ip_port_mappings').execute() else: # NOTE(froyo): before removing a member from the ip_port_mappings # list, we need to ensure that the member is not being monitored by # any other existing HM. To prevent accidentally removing the # member we can use the neutron:member_status to search for any # other members with the same address members_try_remove = self._extract_member_info( ovn_lb.external_ids[pool_key]) other_members = [] for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_POOL_PREFIX in k and k != pool_key: other_members.extend(self._extract_member_info( ovn_lb.external_ids[k])) member_statuses = ovn_lb.external_ids.get( ovn_const.OVN_MEMBER_STATUS_KEY) try: member_statuses = jsonutils.loads(member_statuses) except TypeError: LOG.debug("no member status on external_ids: %s", str(member_statuses)) member_statuses = {} for (mb_ip, mb_port, mb_subnet, mb_id) in members_try_remove: delete = True for member_id in [item[3] for item in other_members if item[0] == mb_ip]: if member_statuses.get( member_id, '') != constants.NO_MONITOR: # same address being monitorized by another HM delete = False if delete: self.ovn_nbdb_api.lb_del_ip_port_mapping( ovn_lb.uuid, mb_ip).execute() def _update_hm_member(self, ovn_lb, pool_key, backend_ip, delete=False): # Update just the backend_ip member for mb_ip, mb_port, mb_subnet, mb_id in self._extract_member_info( ovn_lb.external_ids[pool_key]): if mb_ip == backend_ip: member_lsp = self._get_member_lsp(mb_ip, mb_subnet) if not member_lsp: # No port found for the member backend IP, we can determine # that the port doesn't exists or a typo on creation of the # member, anyway put the member inmediatelly as ERROR LOG.error("Member %(member)s Logical_Switch_Port not " "found, when creating a Health Monitor for " "pool %(pool)s.", {'member': mb_ip, 'pool': pool_key}) return constants.ERROR network_id = member_lsp.external_ids.get( ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY).split('neutron-')[1] project_id = member_lsp.external_ids.get( ovn_const.OVN_PROJECT_EXT_ID_KEY) hm_port = self._ensure_hm_ovn_port( network_id, mb_subnet, project_id) if not hm_port: LOG.error("No port on network %(network)s available for " "health monitoring. Cannot find a Health " "Monitor for pool %(pool)s.", {'network': network_id, 'pool': pool_key}) return None hm_source_ip = None for fixed_ip in hm_port['fixed_ips']: if fixed_ip['subnet_id'] == mb_subnet: hm_source_ip = fixed_ip['ip_address'] break if not hm_source_ip: LOG.error("No port on subnet %(subnet)s available for " "health monitoring member IP %(member)s. Cannot " "find a Health Monitor for pool %(pool)s.", {'subnet': mb_subnet, 'member': mb_ip, 'pool': pool_key}) return None self._update_ip_port_mappings(ovn_lb, backend_ip, member_lsp.name, hm_source_ip, pool_key, delete) return constants.ONLINE # NOTE(froyo): If the backend is not located return constants.ERROR def _lookup_lbhcs_by_hm_id(self, hm_id): lbhc_rows = self.ovn_nbdb_api.db_list_rows( 'Load_Balancer_Health_Check').execute(check_error=True) lbhcs = [] for lbhc in lbhc_rows: if (ovn_const.LB_EXT_IDS_HM_KEY in lbhc.external_ids and lbhc.external_ids[ovn_const.LB_EXT_IDS_HM_KEY] == hm_id): lbhcs.append(lbhc) if lbhcs: return lbhcs raise idlutils.RowNotFound(table='Load_Balancer_Health_Check', col='external_ids', match=hm_id) def _find_ovn_lb_from_hm_id(self, hm_id, lbhc_vip=None): lbs = self.ovn_nbdb_api.db_list_rows( 'Load_Balancer').execute(check_error=True) ovn_lb = None for lb in lbs: if (ovn_const.LB_EXT_IDS_HMS_KEY in lb.external_ids.keys() and hm_id in lb.external_ids[ovn_const.LB_EXT_IDS_HMS_KEY]): ovn_lb = lb break try: lbhcs_by_hm_id = self._lookup_lbhcs_by_hm_id(hm_id) if lbhc_vip: lbhcs = [] for lbhc in lbhcs_by_hm_id: if lbhc.vip == lbhc_vip: lbhcs.append(lbhc) else: lbhcs = lbhcs_by_hm_id except idlutils.RowNotFound: LOG.debug("Loadbalancer health check %s not found!", hm_id) return [], ovn_lb return lbhcs, ovn_lb def hm_create(self, info): status = { constants.HEALTHMONITORS: [ {constants.ID: info[constants.ID], constants.OPERATING_STATUS: constants.NO_MONITOR, constants.PROVISIONING_STATUS: constants.ERROR}]} pool_id = info[constants.POOL_ID] pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id) if not ovn_lb: LOG.debug("Could not find LB with pool id %s", pool_id) return status status[constants.LOADBALANCERS] = [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}] if pool_key not in ovn_lb.external_ids: # Returning early here will cause the pool to go into # PENDING_UPDATE state, which is not good LOG.error("Could not find pool with key %s, LB external_ids: %s", pool_key, ovn_lb.external_ids) status[constants.POOLS] = [ {constants.ID: pool_id, constants.OPERATING_STATUS: constants.OFFLINE}] return status status[constants.POOLS] = [ {constants.ID: pool_id, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: constants.ONLINE}] pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) status[constants.LISTENERS] = [] for listener in pool_listeners: status[constants.LISTENERS].append( {constants.ID: listener, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: constants.ONLINE}) # Update status for all members in the pool member_status = self._update_member_statuses(ovn_lb, pool_id, constants.ACTIVE, constants.ONLINE) status[constants.MEMBERS] = member_status # MONITOR_PRT = 80 # ovn-nbctl --wait=sb -- --id=@hc create Load_Balancer_Health_Check # vip="${LB_VIP_ADDR}\:${MONITOR_PRT}" -- add Load_Balancer # ${OVN_LB_ID} health_check @hc # options here are interval, timeout, failure_count and success_count # from info object passed-in hm_status = self._add_lbhc(ovn_lb, pool_key, info) if hm_status[constants.PROVISIONING_STATUS] == constants.ACTIVE: for mb_ip, mb_port, mb_subnet, mb_id in self._extract_member_info( ovn_lb.external_ids[pool_key]): mb_status = self._update_hm_member(ovn_lb, pool_key, mb_ip) if not mb_status: hm_status[constants.PROVISIONING_STATUS] = constants.ERROR hm_status[constants.OPERATING_STATUS] = constants.ERROR self._clean_ip_port_mappings(ovn_lb, pool_key) break self._update_external_ids_member_status( ovn_lb, mb_id, mb_status) else: status = self._get_current_operating_statuses(ovn_lb) status[constants.HEALTHMONITORS] = [hm_status] return status def hm_sync(self, hm, ovn_lb, pool_key): """Sync Health Monitor object with an OVN LoadBalancer The method performs the following steps: 1. Create Health Monitor in OVN NB in case we don't find load_balancer_health_checks entries associated 2. If we found load_balancer_health_checks entries associated 2.1. Update member status affected on OVN loadbalancer external_ids 2.2. Sync OVN load_balancer_health_checks entries 2.3. Update OVN Loadbalancer ip_port_mappings 2.4. Update OVN Loadbalancer member_status info on external_ids :param hm: The source health monitor object from Octavia DB :param ovn_lb: The OVN LoadBalancer object that needs to be sync :param pool_key: The pool_key where health monitor is associated """ lbhcs, ovn_lb = self._find_ovn_lb_from_hm_id(hm[constants.ID]) if not lbhcs: LOG.debug("Loadbalancer health check %s not found!", hm[constants.ID]) # Create in case we don't found it self.hm_create(hm) return pool_id = hm[constants.POOL_ID] self._update_member_statuses(ovn_lb, pool_id, constants.ACTIVE, constants.ONLINE) try: self._sync_lbhc(ovn_lb, pool_key, hm) except Exception as e: LOG.exception(f"Failed syncing Load Balancer Health Monitor: {e}") for mb_ip, mb_port, mb_subnet, mb_id in self._extract_member_info( ovn_lb.external_ids[pool_key]): mb_status = self._update_hm_member(ovn_lb, pool_key, mb_ip) if not mb_status: self._clean_ip_port_mappings(ovn_lb, pool_key) break self._update_external_ids_member_status(ovn_lb, mb_id, mb_status) def hm_update(self, info): status = { constants.HEALTHMONITORS: [ {constants.ID: info[constants.ID], constants.OPERATING_STATUS: constants.ERROR, constants.PROVISIONING_STATUS: constants.ERROR}]} hm_id = info[constants.ID] pool_id = info[constants.POOL_ID] lbhcs, ovn_lb = self._find_ovn_lb_from_hm_id(hm_id) if not lbhcs: LOG.debug("Loadbalancer health check %s not found!", hm_id) return status if not ovn_lb: LOG.debug("Could not find LB with health monitor id %s", hm_id) # Do we really need to try this hard? pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id) if not ovn_lb: LOG.debug("Could not find LB with pool id %s", pool_id) return status options = {} if info['interval']: options['interval'] = str(info['interval']) if info['timeout']: options['timeout'] = str(info['timeout']) if info['success_count']: options['success_count'] = str(info['success_count']) if info['failure_count']: options['failure_count'] = str(info['failure_count']) commands = [] for lbhc in lbhcs: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer_Health_Check', lbhc.uuid, ('options', options))) self._execute_commands(commands) operating_status = constants.ONLINE if not info['admin_state_up']: operating_status = constants.OFFLINE status = { constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.POOLS: [ {constants.ID: pool_id, constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.HEALTHMONITORS: [ {constants.ID: info[constants.ID], constants.OPERATING_STATUS: operating_status, constants.PROVISIONING_STATUS: constants.ACTIVE}]} return status def hm_delete(self, info): hm_id = info[constants.ID] pool_id = info[constants.POOL_ID] status = { constants.HEALTHMONITORS: [ {constants.ID: hm_id, constants.OPERATING_STATUS: constants.NO_MONITOR, constants.PROVISIONING_STATUS: constants.DELETED}]} lbhcs, ovn_lb = self._find_ovn_lb_from_hm_id(hm_id) if not lbhcs or not ovn_lb: LOG.debug("Loadbalancer Health Check associated to Health Monitor " "%s not found in OVN Northbound DB. Setting the " "Loadbalancer Health Monitor status to DELETED in " "Octavia", hm_id) return status # Need to send pool info in status update to avoid immutable objects, # the LB should have this info. Also in order to delete the hm port # used for health checks we need to get all subnets from the members # on the pool pool_listeners = [] member_subnets = [] for k, v in ovn_lb.external_ids.items(): if self._get_pool_key(pool_id) == k: members = self._extract_member_info(ovn_lb.external_ids[k]) member_subnets = list( set([mb_subnet for (mb_ip, mb_port, mb_subnet, mb_id) in members]) ) pool_listeners = self._get_pool_listeners( ovn_lb, self._get_pool_key(pool_id)) break # ovn-nbctl clear load_balancer ${OVN_LB_ID} ip_port_mappings # ovn-nbctl clear load_balancer ${OVN_LB_ID} health_check # TODO(haleyb) remove just the ip_port_mappings for this hm hms_key = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_HMS_KEY, []) # Update status for members in the pool related to HM member_status = self._update_member_statuses(ovn_lb, pool_id, constants.ACTIVE, constants.NO_MONITOR) if hms_key: hms_key = jsonutils.loads(hms_key) if hm_id in hms_key: hms_key.remove(hm_id) self._clean_ip_port_mappings(ovn_lb, ovn_const.LB_EXT_IDS_POOL_PREFIX + str(pool_id)) commands = [] for lbhc in lbhcs: commands.append( self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid, 'health_check', lbhc.uuid)) commands.append( self.ovn_nbdb_api.db_destroy('Load_Balancer_Health_Check', lbhc.uuid)) if hms_key: commands.append( self.ovn_nbdb_api.db_set( 'Load_Balancer', ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_HMS_KEY: jsonutils.dumps(hms_key)}))) else: commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'external_ids', (ovn_const.LB_EXT_IDS_HMS_KEY))) self._execute_commands(commands) # Delete the hm port if not in use by other health monitors for subnet in member_subnets: self._clean_up_hm_port(subnet) status = { constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.POOLS: [ {constants.ID: pool_id, constants.PROVISIONING_STATUS: constants.ACTIVE}], constants.HEALTHMONITORS: [ {constants.ID: info[constants.ID], constants.OPERATING_STATUS: constants.NO_MONITOR, constants.PROVISIONING_STATUS: constants.DELETED}]} if member_status: status[constants.MEMBERS] = member_status status[constants.LISTENERS] = [] for listener in pool_listeners: status[constants.LISTENERS].append( {constants.ID: listener, constants.PROVISIONING_STATUS: constants.ACTIVE}) return status def _get_lbs_on_hm_event(self, row): """Get the Load Balancer information on a health_monitor event This function is called when the status of a member has been updated. As no duplicate entries are created on a same member for different LBs we will search all LBs affected by the member reported in the health check event Input: Service Monitor row which is coming from ServiceMonitorUpdateEvent. Output: Rows from load_balancer table table matching the member for which the event was generated. Exception: RowNotFound exception can be generated. """ # ip_port_mappings: {"MEMBER_IP"="LSP_NAME_MEMBER:HEALTH_SRC"} # There could be more than one entry in ip_port_mappings! mappings = {} hm_source_ip = str(row.src_ip) member_ip = str(row.ip) member_src = f'{row.logical_port}:' if netaddr.IPNetwork(hm_source_ip).version == n_const.IP_VERSION_6: member_src += f'[{hm_source_ip}]' else: member_src += f'{hm_source_ip}' if netaddr.IPNetwork(member_ip).version == n_const.IP_VERSION_6: member_ip = f'[{member_ip}]' mappings[member_ip] = member_src lbs = self.ovn_nbdb_api.db_find_rows( 'Load_Balancer', ('ip_port_mappings', '=', mappings), ('protocol', '=', row.protocol[0])).execute() return lbs if lbs else None def sm_update_event_handler(self, row, sm_delete_event=False): # NOTE(froyo): When a delete event is triggered, the Service_Monitor # deleted row will include the last valid information, e.g. when the # port is directly removed from the VM, the status will be 'online', # in order to protect from this behaviour, we will set manually the # status to 'offline' if sm_delete_event is reported as True. try: ovn_lbs = self._get_lbs_on_hm_event(row) except idlutils.RowNotFound: LOG.debug("Load balancer information not found") return if not ovn_lbs: LOG.debug("Load balancer not found") return request_info = { "ovn_lbs": ovn_lbs, "ip": row.ip, "port": str(row.port), "status": row.status if not sm_delete_event else ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE, } self.add_request({'type': ovn_const.REQ_TYPE_HM_UPDATE_EVENT, 'info': request_info}) def _get_current_operating_statuses(self, ovn_lb): # NOTE (froyo) We would base all logic in the external_ids field # 'neutron:member_status' that should include all LB member status # in order to calculate the global LB status (listeners, pools, members # included) status = { constants.LOADBALANCERS: [], constants.LISTENERS: [], constants.POOLS: [], constants.MEMBERS: [] } listeners = {} pools = {} member_statuses = ovn_lb.external_ids.get( ovn_const.OVN_MEMBER_STATUS_KEY) try: member_statuses = jsonutils.loads(member_statuses) except TypeError: LOG.debug("no member status on external_ids: %s", str(member_statuses)) member_statuses = {} for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_LISTENER_PREFIX in k: listeners[k.split('_')[1]] = [ x.split('_')[1] for x in v.split(',') if ovn_const.LB_EXT_IDS_POOL_PREFIX in x] continue if ovn_const.LB_EXT_IDS_POOL_PREFIX in k: pools[k.split('_')[1]] = [ x.split('_')[1] for x in v.split(',') if x] continue for member_id, member_status in member_statuses.items(): status[constants.MEMBERS].append({ constants.ID: member_id, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: member_status}) # get pool statuses for pool_id, members in pools.items(): for i, member in enumerate(members): if member in member_statuses: members[i] = member_statuses[member] else: # if we don't have local info we assume best option members[i] = constants.ONLINE _pool = self._octavia_driver_lib.get_pool(pool_id) if not _pool.admin_state_up or not member_statuses: pools[pool_id] = constants.OFFLINE elif pools[pool_id] and all(constants.ERROR == member_status for member_status in pools[pool_id]): pools[pool_id] = constants.ERROR elif pools[pool_id] and any(constants.ERROR == member_status for member_status in pools[pool_id]): pools[pool_id] = constants.DEGRADED else: pools[pool_id] = constants.ONLINE status[constants.POOLS].append( {constants.ID: pool_id, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: pools[pool_id]}) # get listener statuses for listener_id, listener_pools in listeners.items(): for i, pool in enumerate(listener_pools): if pool in pools: listener_pools[i] = pools[pool] else: # if we don't have local info we assume best option listener_pools[i] = constants.ONLINE _listener = self._octavia_driver_lib.get_listener(listener_id) if not _listener.admin_state_up: listeners[listener_id] = constants.OFFLINE elif any(constants.ERROR == pool_status for pool_status in listeners[listener_id]): listeners[listener_id] = constants.ERROR elif any(constants.DEGRADED == pool_status for pool_status in listeners[listener_id]): listeners[listener_id] = constants.DEGRADED else: listeners[listener_id] = constants.ONLINE status[constants.LISTENERS].append( {constants.ID: listener_id, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: listeners[listener_id]}) # get LB status lb_status = constants.ONLINE _lb = self._octavia_driver_lib.get_loadbalancer(ovn_lb.name) if not _lb.admin_state_up: lb_status = constants.OFFLINE elif any(constants.ERROR == status for status in listeners.values()): lb_status = constants.ERROR elif any(constants.DEGRADED == status for status in listeners.values()): lb_status = constants.DEGRADED status[constants.LOADBALANCERS].append({ constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: lb_status}) return status def hm_update_event(self, info): ovn_lbs = info['ovn_lbs'] statuses = [] for ovn_lb in ovn_lbs: # Lookup member member_id = None for k, v in ovn_lb.external_ids.items(): if ovn_const.LB_EXT_IDS_POOL_PREFIX not in k: continue for ( mb_ip, mb_port, mb_subnet, mb_id, ) in self._extract_member_info(v): if info['ip'] != mb_ip: continue if info['port'] != mb_port: continue # match member_id = [mb.split('_')[1] for mb in v.split(',') if mb_ip in mb and mb_port in mb][0] break # found it in inner loop if member_id: break if not member_id: LOG.warning('Member for event not found, info: %s', info) else: member_status = constants.ONLINE if info['status'] == ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE: member_status = constants.ERROR self._update_external_ids_member_status(ovn_lb, member_id, member_status) statuses.append(self._get_current_operating_statuses(ovn_lb)) if not statuses: return status = {} for status_lb in statuses: for k in status_lb.keys(): if k not in status: status[k] = [] status[k].extend(status_lb[k]) return status def hm_purge(self, lb_id): # remove redundant hm if any, if no # ovn_const.LB_EXT_IDS_HMS_KEY from lb matches, # this hm has no used and should already got replaced. ovn_lbs = [] try: ovn_lbs = self._find_ovn_lbs_with_retry(lb_id) except idlutils.RowNotFound: LOG.debug(f"OVN loadbalancer {lb_id} not found.") fetch_hc_ids = [] for ovn_lb in ovn_lbs: hm_ids = ovn_lb.external_ids.get( ovn_const.LB_EXT_IDS_HMS_KEY, []) if hm_ids: hm_ids = jsonutils.loads(hm_ids) for hm_id in hm_ids: lbhcs = [] try: lbhcs = self._lookup_lbhcs_by_hm_id(hm_id) except idlutils.RowNotFound: continue fetch_hc_ids.extend([str(lbhc.uuid) for lbhc in lbhcs]) for lbhc in ovn_lb.health_check: if str(lbhc.uuid) not in fetch_hc_ids: commands = [] commands.append( self.ovn_nbdb_api.db_remove( 'Load_Balancer', ovn_lb.uuid, 'health_check', lbhc.uuid)) commands.append( self.ovn_nbdb_api.db_destroy( 'Load_Balancer_Health_Check', lbhc.uuid)) try: self._execute_commands(commands) except idlutils.RowNotFound: LOG.debug("health check not found for purge.") ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/i18n.py0000664000175100017510000000140315033037524023350 0ustar00mylesmyles# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n as i18n _translators = i18n.TranslatorFactory(domain='octavia') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/maintenance.py0000664000175100017510000001631015033037524025056 0ustar00mylesmyles# Copyright 2023 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import threading from futurist import periodics import netaddr from neutron_lib import constants as n_const from oslo_config import cfg from oslo_log import log as logging from ovsdbapp.backend.ovs_idl import connection from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import config as ovn_conf from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider.ovsdb import impl_idl_ovn CONF = cfg.CONF # Gets Octavia Conf as it runs under o-api domain LOG = logging.getLogger(__name__) class MaintenanceThread(object): def __init__(self): self._callables = [] self._thread = None self._worker = None def add_periodics(self, obj): for name, member in inspect.getmembers(obj): if periodics.is_periodic(member): LOG.info('Periodic task found: %(owner)s.%(member)s', {'owner': obj.__class__.__name__, 'member': name}) self._callables.append((member, (), {})) def start(self): if self._thread is None: self._worker = periodics.PeriodicWorker(self._callables) self._thread = threading.Thread(target=self._worker.start) self._thread.daemon = True self._thread.start() def stop(self): self._worker.stop() self._worker.wait() self._thread.join() self._worker = self._thread = None class DBInconsistenciesPeriodics(object): def __init__(self): self.ovn_nbdb = impl_idl_ovn.OvnNbIdlForLb() c = connection.Connection(self.ovn_nbdb, ovn_conf.get_ovn_ovsdb_timeout()) self.ovn_nbdb_api = impl_idl_ovn.OvsdbNbOvnIdl(c) @periodics.periodic(spacing=600, run_immediately=True) def change_device_owner_lb_hm_ports(self): """Change the device_owner for the OVN LB HM port existing. The OVN LB HM port used for send the health checks to the backend members has a new device_owner, it will use the value onv-lb-hm:distributed in order to keep the behaviour on Neutron as a LOCALPORT. Also this change will add device-id as ovn-lb-hm:{subnet} to get more robust. """ LOG.debug('Maintenance task: checking device_owner for OVN LB HM ' 'ports.') neutron_client = clients.get_neutron_client() ovn_lb_hm_ports = neutron_client.ports( device_owner=n_const.DEVICE_OWNER_DISTRIBUTED) check_neutron_support_new_device_owner = True for port in ovn_lb_hm_ports: if port.name.startswith('ovn-lb-hm'): LOG.debug('Maintenance task: updating device_owner and ' 'adding device_id for port id %s', port.id) neutron_client.update_port( port.id, device_owner=ovn_const.OVN_LB_HM_PORT_DISTRIBUTED, device_id=port.name) # NOTE(froyo): Check that the port is now of type LOCALPORT in # the OVN NB DB or perform a rollback in other cases. Such # cases could indicate that Neutron is in the process of being # updated or that the user has forgotten to update Neutron to a # version that supports this change if check_neutron_support_new_device_owner: port_ovn = self.ovn_nbdb_api.db_find_rows( "Logical_Switch_Port", ("name", "=", port.id)).execute( check_error=True) if len(port_ovn) and port_ovn[0].type != 'localport': LOG.debug('Maintenance task: port %s updated but ' 'looks like Neutron does not support this ' 'new device_owner, or maybe is updating ' 'version, so restoring to old values and ' 'waiting another iteration of this task', port.id) neutron_client.update_port( port.id, device_owner=n_const.DEVICE_OWNER_DISTRIBUTED, device_id='') # Break the loop as do not make sense change the rest break check_neutron_support_new_device_owner = False else: # NOTE(froyo): No ports found to update, or all of them done. LOG.debug('Maintenance task: no more ports left, stopping the ' 'periodic task.') raise periodics.NeverAgain() LOG.debug('Maintenance task: device_owner and device_id checked for ' 'OVN LB HM ports.') # TODO(froyo): Remove this in the Caracal+4 cycle @periodics.periodic(spacing=600, run_immediately=True) def format_ip_port_mappings_ipv6(self): """Give correct format to `ip_port_mappings` for IPv6 backend members. The `ip_port_mappings` field for OVN LBs should be a dictionary with keys following the format: `${MEMBER_IP}=${LSP_NAME_MEMBER}:${HEALTH_SRC_IP}`. However, when `MEMBER_IP` and `HEALTH_SRC_IP` are IPv6 addresses, they should be enclosed in `[]`. """ LOG.debug('Maintenance task: Ensure correct formatting of ' 'ip_port_mappings for IPv6 backend members.') ovn_lbs = self.ovn_nbdb_api.db_find_rows( 'Load_Balancer', ('ip_port_mappings', '!=', {})).execute() for lb in ovn_lbs: mappings = {} for k, v in lb.ip_port_mappings.items(): try: # If first element is IPv4 (mixing IPv4 and IPv6 not # allowed) or get AddrFormatError (IPv6 already fixed) we # can jump to next item if netaddr.IPNetwork(k).version == n_const.IP_VERSION_4: break except netaddr.AddrFormatError: break port_uuid, src_ip = v.split(':', 1) mappings[f'[{k}]'] = f'{port_uuid}:[{src_ip}]' if mappings: self.ovn_nbdb_api.db_clear( 'Load_Balancer', lb.uuid, 'ip_port_mappings' ).execute(check_error=True) self.ovn_nbdb_api.db_set( 'Load_Balancer', lb.uuid, ('ip_port_mappings', mappings) ).execute(check_error=True) LOG.debug('Maintenance task: no more ip_port_mappings to format, ' 'stopping the periodic task.') raise periodics.NeverAgain() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5159845 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/ovsdb/0000775000175100017510000000000015033037526023340 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/ovsdb/impl_idl_ovn.py0000664000175100017510000002423315033037524026367 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import contextlib import netaddr from neutron_lib import constants as n_const from neutron_lib import exceptions as n_exc from oslo_log import log from ovsdbapp.backend import ovs_idl from ovsdbapp.backend.ovs_idl import command from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp.backend.ovs_idl import rowview from ovsdbapp.backend.ovs_idl import transaction as idl_trans from ovsdbapp.schema.ovn_northbound import impl_idl as nb_impl_idl from ovsdbapp.schema.ovn_southbound import impl_idl as sb_impl_idl import tenacity from ovn_octavia_provider.common import config from ovn_octavia_provider.common import exceptions as ovn_exc from ovn_octavia_provider.common import utils from ovn_octavia_provider.i18n import _ from ovn_octavia_provider.ovsdb import ovsdb_monitor LOG = log.getLogger(__name__) class OvnNbTransaction(idl_trans.Transaction): def __init__(self, *args, **kwargs): # NOTE(lucasagomes): The bump_nb_cfg parameter is only used by # the agents health status check self.bump_nb_cfg = kwargs.pop('bump_nb_cfg', False) super().__init__(*args, **kwargs) def pre_commit(self, txn): if not self.bump_nb_cfg: return self.api.nb_global.increment('nb_cfg') class Backend(ovs_idl.Backend): def is_table_present(self, table_name): return table_name in self._tables def is_col_present(self, table_name, col_name): return self.is_table_present(table_name) and ( col_name in self._tables[table_name].columns) # Check for a column match in the table. If not found do a retry with # a stop delay of 10 secs. This function would be useful if the caller # wants to verify for the presence of a particular row in the table # with the column match before doing any transaction. # Eg. We can check if Logical_Switch row is present before adding a # logical switch port to it. @tenacity.retry(retry=tenacity.retry_if_exception_type(RuntimeError), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_delay(10), reraise=True) def check_for_row_by_value_and_retry(self, table, column, match): try: idlutils.row_by_value(self.idl, table, column, match) except idlutils.RowNotFound as e: msg = (_("%(match)s does not exist in %(column)s of %(table)s") % {'match': match, 'column': column, 'table': table}) raise RuntimeError(msg) from e class OvsdbConnectionUnavailable(n_exc.ServiceUnavailable): message = _("OVS database connection to %(db_schema)s failed with error: " "'%(error)s'. Verify that the OVS and OVN services are " "available and that the 'ovn_nb_connection' and " "'ovn_sb_connection' configuration options are correct.") class FindLbInTableCommand(command.ReadOnlyCommand): def __init__(self, api, lb, table): super().__init__(api) self.lb = lb self.table = table def run_idl(self, txn): self.result = [ rowview.RowView(item) for item in self.api.tables[self.table].rows.values() if self.lb in item.load_balancer] class GetLrsCommand(command.ReadOnlyCommand): def run_idl(self, txn): self.result = [ rowview.RowView(item) for item in self.api.tables['Logical_Router'].rows.values()] # NOTE(froyo): remove this class once ovsdbapp manages the IPv6 into [ ] # https://bugs.launchpad.net/ovsdbapp/+bug/2057471 class DelBackendFromIPPortMapping(command.BaseCommand): table = 'Load_Balancer' def __init__(self, api, lb, backend_ip): super().__init__(api) self.lb = lb if netaddr.IPNetwork(backend_ip).version == n_const.IP_VERSION_6: self.backend_ip = f'[{backend_ip}]' else: self.backend_ip = backend_ip def run_idl(self, txn): try: ovn_lb = self.api.lookup(self.table, self.lb) ovn_lb.delkey('ip_port_mappings', self.backend_ip) except Exception: LOG.exception("Error deleting backend %s from ip_port_mappings " "for LB uuid %s", str(self.backend_ip), str(self.lb)) # NOTE(froyo): remove this class once ovsdbapp manages the IPv6 into [ ] # https://bugs.launchpad.net/ovsdbapp/+bug/2057471 class AddBackendToIPPortMapping(command.BaseCommand): table = 'Load_Balancer' def __init__(self, api, lb, backend_ip, port_name, src_ip): super().__init__(api) self.lb = lb self.backend_ip = backend_ip self.port_name = port_name self.src_ip = src_ip if netaddr.IPNetwork(backend_ip).version == n_const.IP_VERSION_6: self.backend_ip = f'[{backend_ip}]' self.src_ip = f'[{src_ip}]' def run_idl(self, txn): try: lb = self.api.lookup(self.table, self.lb) lb.setkey('ip_port_mappings', self.backend_ip, '%s:%s' % (self.port_name, self.src_ip)) except Exception: LOG.exception("Error adding backend %s to ip_port_mappings " "for LB uuid %s", str(self.backend_ip), str(self.lb)) class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend): def __init__(self, connection): super().__init__(connection) self.idl._session.reconnect.set_probe_interval( config.get_ovn_ovsdb_probe_interval()) @property def nb_global(self): return next(iter(self.tables['NB_Global'].rows.values())) def create_transaction(self, check_error=False, log_errors=True, bump_nb_cfg=False): return OvnNbTransaction( self, self.ovsdb_connection, self.ovsdb_connection.timeout, check_error, log_errors, bump_nb_cfg=bump_nb_cfg) @contextlib.contextmanager def transaction(self, *args, **kwargs): """A wrapper on the ovsdbapp transaction to work with revisions. This method is just a wrapper around the ovsdbapp transaction to handle revision conflicts correctly. """ try: with super().transaction(*args, **kwargs) as t: yield t except ovn_exc.RevisionConflict as e: LOG.info('Transaction aborted. Reason: %s', e) def find_lb_in_table(self, lb, table): return FindLbInTableCommand(self, lb, table) def get_lrs(self): return GetLrsCommand(self) # NOTE(froyo): remove this method once ovsdbapp manages the IPv6 into [ ] def lb_del_ip_port_mapping(self, lb_uuid, backend_ip): return DelBackendFromIPPortMapping(self, lb_uuid, backend_ip) # NOTE(froyo): remove this method once ovsdbapp manages the IPv6 into [ ] def lb_add_ip_port_mapping(self, lb_uuid, backend_ip, port_name, src_ip): return AddBackendToIPPortMapping(self, lb_uuid, backend_ip, port_name, src_ip) class OvsdbSbOvnIdl(sb_impl_idl.OvnSbApiIdlImpl, Backend): def __init__(self, connection): super().__init__(connection) self.idl._session.reconnect.set_probe_interval( config.get_ovn_ovsdb_probe_interval()) class OvnNbIdlForLb(ovsdb_monitor.OvnIdl): SCHEMA = "OVN_Northbound" TABLES = ('Logical_Switch', 'Load_Balancer', 'Load_Balancer_Health_Check', 'Logical_Router', 'Logical_Switch_Port', 'Logical_Router_Port', 'Gateway_Chassis', 'NAT', 'HA_Chassis_Group') def __init__(self, event_lock_name=None, notifier=True): self.conn_string = config.get_ovn_nb_connection() ovsdb_monitor.check_and_set_ssl_files(self.SCHEMA) helper = self._get_ovsdb_helper(self.conn_string) for table in OvnNbIdlForLb.TABLES: helper.register_table(table) super().__init__( driver=None, remote=self.conn_string, schema=helper, notifier=notifier) self.event_lock_name = event_lock_name if self.event_lock_name: self.set_lock(self.event_lock_name) @utils.retry() def _get_ovsdb_helper(self, connection_string): return idlutils.get_schema_helper(connection_string, self.SCHEMA) class OvnSbIdlForLb(ovsdb_monitor.OvnIdl): SCHEMA = "OVN_Southbound" TABLES = ('Load_Balancer', 'Service_Monitor') def __init__(self, event_lock_name=None): self.conn_string = config.get_ovn_sb_connection() ovsdb_monitor.check_and_set_ssl_files(self.SCHEMA) helper = self._get_ovsdb_helper(self.conn_string) for table in OvnSbIdlForLb.TABLES: helper.register_table(table) super().__init__( driver=None, remote=self.conn_string, schema=helper) self.event_lock_name = event_lock_name if self.event_lock_name: self.set_lock(self.event_lock_name) atexit.register(self.stop) @utils.retry() def _get_ovsdb_helper(self, connection_string): return idlutils.get_schema_helper(connection_string, self.SCHEMA) def start(self): self.conn = connection.Connection( self, timeout=config.get_ovn_ovsdb_timeout()) return OvsdbSbOvnIdl(self.conn) def stop(self): # Close the running connection if it has been initalized if hasattr(self, 'conn'): if not self.conn.stop(timeout=config.get_ovn_ovsdb_timeout()): LOG.debug("Connection terminated to OvnSb " "but a thread is still alive") del self.conn # complete the shutdown for the event handler self.notify_handler.shutdown() # Close the idl session self.close() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/ovsdb/ovsdb_monitor.py0000664000175100017510000001040415033037524026573 0ustar00mylesmyles# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import os from oslo_config import cfg from oslo_log import log from ovs.stream import Stream from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp import event from ovn_octavia_provider.common import config as ovn_config CONF = cfg.CONF LOG = log.getLogger(__name__) class BaseOvnIdl(connection.OvsdbIdl): @classmethod def from_server(cls, connection_string, schema_name): check_and_set_ssl_files(schema_name) helper = idlutils.get_schema_helper(connection_string, schema_name) helper.register_all() return cls(connection_string, helper) class OvnIdl(BaseOvnIdl): def __init__(self, driver, remote, schema, notifier=True): super().__init__(remote, schema) self.driver = driver if notifier: self.notify_handler = OvnDbNotifyHandler(driver) else: self.notify_handler = None # ovsdb lock name to acquire. # This event lock is used to handle the notify events sent by idl.Idl # idl.Idl will call notify function for the "update" rpc method it # receives from the ovsdb-server. # This event lock is required for the following reasons # - If there are multiple neutron servers running, OvnWorkers of # these neutron servers would receive the notify events from # idl.Idl # # - we do not want all the neutron servers to handle these events # # - only the neutron server which has the lock will handle the # notify events. # # - In case the neutron server which owns this lock goes down, # ovsdb server would assign the lock to one of the other neutron # servers. self.event_lock_name = "ovn_provider_driver_event_lock" def notify(self, event, row, updates=None): # Do not handle the notification if the event lock is requested, # but not granted by the ovsdb-server. if self.is_lock_contended: return if not self.notify_handler: return row = idlutils.frozen_row(row) self.notify_handler.notify(event, row, updates) @abc.abstractmethod def post_connect(self): """Should be called after the idl has been initialized""" class OvnDbNotifyHandler(event.RowEventHandler): def __init__(self, driver): super().__init__() self.driver = driver def check_and_set_ssl_files(schema_name): if schema_name in ['OVN_Northbound', 'OVN_Southbound']: if schema_name == 'OVN_Northbound': priv_key_file = ovn_config.get_ovn_nb_private_key() cert_file = ovn_config.get_ovn_nb_certificate() ca_cert_file = ovn_config.get_ovn_nb_ca_cert() else: priv_key_file = ovn_config.get_ovn_sb_private_key() cert_file = ovn_config.get_ovn_sb_certificate() ca_cert_file = ovn_config.get_ovn_sb_ca_cert() if priv_key_file: if not os.path.exists(priv_key_file): LOG.error(f"Cannot find private key file for {schema_name}") else: Stream.ssl_set_private_key_file(priv_key_file) if cert_file: if not os.path.exists(cert_file): LOG.error(f"Cannot find private cert file for {schema_name}") else: Stream.ssl_set_certificate_file(cert_file) if ca_cert_file: if not os.path.exists(ca_cert_file): LOG.error(f"Cannot find ca cert file for {schema_name}") else: Stream.ssl_set_ca_cert_file(ca_cert_file) else: LOG.error(f"{schema_name} not supported") ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5159845 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/0000775000175100017510000000000015033037526023365 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/__init__.py0000664000175100017510000000000015033037524025462 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5159845 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/functional/0000775000175100017510000000000015033037526025527 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/functional/__init__.py0000664000175100017510000000000015033037524027624 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/functional/base.py0000664000175100017510000016012715033037524027020 0ustar00mylesmyles# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import time from unittest import mock from neutron.common import utils as n_utils from neutron_lib import constants as n_const from neutron_lib.plugins import directory from octavia_lib.api.drivers import data_models as octavia_data_model from octavia_lib.api.drivers import driver_lib from octavia_lib.common import constants as o_constants from oslo_db import exception as odb_exc from oslo_serialization import jsonutils from oslo_utils import uuidutils from ovsdbapp.schema.ovn_northbound import impl_idl as nb_idl_ovn from ovsdbapp.schema.ovn_southbound import impl_idl as sb_idl_ovn import tenacity # NOTE(mjozefcz): We need base neutron functionals because we need # mechanism driver and l3 plugin. from neutron.tests.functional import base from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider import driver as ovn_driver class TestOvnOctaviaBase(base.TestOVNFunctionalBase, base.BaseLoggingTestCase): def setUp(self): super().setUp() nb_idl_ovn.OvnNbApiIdlImpl.ovsdb_connection = None sb_idl_ovn.OvnSbApiIdlImpl.ovsdb_connection = None # TODO(mjozefcz): Use octavia listeners to provide needed # sockets and modify tests in order to verify if fake # listener (status) has received valid value. try: mock.patch.object( driver_lib.DriverLibrary, '_check_for_socket_ready').start() except AttributeError: # Backward compatiblity with octavia-lib < 1.3.1 pass self.ovn_driver = ovn_driver.OvnProviderDriver() self.ovn_driver._ovn_helper._octavia_driver_lib = mock.MagicMock() self._o_driver_lib = self.ovn_driver._ovn_helper._octavia_driver_lib self._o_driver_lib.update_loadbalancer_status = mock.Mock() self.fake_neutron_client = mock.MagicMock() clients.get_neutron_client = mock.MagicMock() clients.get_neutron_client.return_value = self.fake_neutron_client self.fake_neutron_client.get_network = self._mock_get_network self.fake_neutron_client.get_subnet = self._mock_get_subnet self.fake_neutron_client.ports = self._mock_ports self.fake_neutron_client.get_port = self._mock_get_port self.fake_neutron_client.delete_port.return_value = True self._local_net_cache = {} self._local_cidr_cache = {} self._local_port_cache = {'ports': []} self.core_plugin = directory.get_plugin() def _port_dict_to_mock(self, port_dict): port = mock.Mock(**port_dict) return port def _mock_get_network(self, network_id): network = mock.Mock() network.id = network_id network.provider_physical_network = None return network def _mock_get_subnet(self, subnet_id): subnet = mock.Mock() subnet.network_id = self._local_net_cache[subnet_id] subnet.cidr = self._local_cidr_cache[subnet_id] subnet.gateway_ip = None return subnet def _mock_ports(self, **kwargs): return self._local_port_cache['ports'] def _mock_get_port(self, port_id): for port in self._local_port_cache['ports']: if port.id == port_id: return port def _create_provider_network(self): e1 = self._make_network(self.fmt, 'e1', True, True, arg_list=('router:external', 'provider:network_type', 'provider:physical_network'), **{'router:external': True, 'provider:network_type': 'flat', 'provider:physical_network': 'public'}) res = self._create_subnet(self.fmt, e1['network']['id'], '100.0.0.0/24', gateway_ip='100.0.0.254', allocation_pools=[{'start': '100.0.0.2', 'end': '100.0.0.253'}], enable_dhcp=False) e1_s1 = self.deserialize(self.fmt, res) return e1, e1_s1 def _create_lb_model(self, vip=None, vip_network_id=None, vip_subnet_id=None, vip_port_id=None, admin_state_up=True, additional_vips=[]): lb = octavia_data_model.LoadBalancer() lb.loadbalancer_id = uuidutils.generate_uuid() if vip: lb.vip_address = vip else: lb.vip_address = '10.0.0.4' if vip_network_id: lb.vip_network_id = vip_network_id if vip_subnet_id: lb.vip_subnet_id = vip_subnet_id if vip_port_id: lb.vip_port_id = vip_port_id if additional_vips: lb.additional_vips = additional_vips lb.admin_state_up = admin_state_up return lb def _create_pool_model( self, loadbalancer_id, pool_name, protocol=o_constants.PROTOCOL_TCP, lb_algorithm=o_constants.LB_ALGORITHM_SOURCE_IP_PORT, admin_state_up=True, listener_id=None): m_pool = octavia_data_model.Pool() if protocol: m_pool.protocol = protocol else: m_pool.protocol = o_constants.PROTOCOL_TCP m_pool.name = pool_name m_pool.pool_id = uuidutils.generate_uuid() m_pool.loadbalancer_id = loadbalancer_id m_pool.members = [] m_pool.admin_state_up = admin_state_up m_pool.lb_algorithm = lb_algorithm if listener_id: m_pool.listener_id = listener_id return m_pool def _create_member_model(self, pool_id, subnet_id, address, protocol_port=None, admin_state_up=True): m_member = octavia_data_model.Member() if protocol_port: m_member.protocol_port = protocol_port else: m_member.protocol_port = 80 m_member.member_id = uuidutils.generate_uuid() m_member.pool_id = pool_id if subnet_id: m_member.subnet_id = subnet_id m_member.address = address m_member.admin_state_up = admin_state_up return m_member def _create_hm_model(self, pool_id, name, delay, timeout, max_retries, hm_type, max_retries_down=3, admin_state_up=True): return octavia_data_model.HealthMonitor( admin_state_up=admin_state_up, delay=delay, max_retries=max_retries, max_retries_down=max_retries_down, healthmonitor_id=uuidutils.generate_uuid(), name=name, pool_id=pool_id, type=hm_type, timeout=timeout) def _create_listener_model(self, loadbalancer_id, pool_id=None, protocol_port=80, protocol=None, admin_state_up=True): m_listener = octavia_data_model.Listener() if protocol: m_listener.protocol = protocol else: m_listener.protocol = o_constants.PROTOCOL_TCP m_listener.listener_id = uuidutils.generate_uuid() m_listener.loadbalancer_id = loadbalancer_id if pool_id: m_listener.default_pool_id = pool_id m_listener.protocol_port = protocol_port m_listener.admin_state_up = admin_state_up return m_listener def _get_loadbalancers(self): lbs = [] for lb in self.nb_api.tables['Load_Balancer'].rows.values(): external_ids = dict(lb.external_ids) # Skip load balancers used by port forwarding plugin if external_ids.get(ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY) == ( ovn_const.PORT_FORWARDING_PLUGIN): continue ls_refs = external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) if ls_refs: external_ids[ ovn_const.LB_EXT_IDS_LS_REFS_KEY] = jsonutils.loads( ls_refs) member_status = external_ids.get(ovn_const.OVN_MEMBER_STATUS_KEY) if member_status: external_ids[ ovn_const.OVN_MEMBER_STATUS_KEY] = jsonutils.loads( member_status) lb_dict = {'name': lb.name, 'protocol': lb.protocol, 'vips': lb.vips, 'external_ids': external_ids} try: lb_dict['selection_fields'] = lb.selection_fields except AttributeError: pass lbs.append(lb_dict) return lbs def _get_loadbalancer_id(self, lb_name): for lb in self.nb_api.tables['Load_Balancer'].rows.values(): if lb.name == lb_name: return lb.uuid def _validate_loadbalancers(self, expected_lbs): observed_lbs = self._get_loadbalancers() # NOTE (mjozefcz): assertCountEqual works only on first level # of comparison, if dicts inside dicts are in different # order it would fail. self.assertEqual(len(expected_lbs), len(observed_lbs)) for expected_lb in expected_lbs: # search for LB with same name and protocol found = False for observed_lb in observed_lbs: if (observed_lb.get('name') == expected_lb.get('name') and observed_lb.get('protocol') == expected_lb.get('protocol')): self.assertEqual(expected_lb, observed_lb) found = True if not found: raise Exception("Expected LB %s for protocol %s " "not found in observed_lbs" % ( expected_lb.get('name'), expected_lb.get('proto'))) def _is_lb_associated_to_ls(self, lb_name, ls_name): return self._is_lb_associated_to_tab( 'Logical_Switch', lb_name, ls_name) def _is_lb_associated_to_lr(self, lb_name, lr_name): return self._is_lb_associated_to_tab( 'Logical_Router', lb_name, lr_name) def _is_lb_associated_to_tab(self, table, lb_name, ls_name): lb_uuid = self._get_loadbalancer_id(lb_name) for ls in self.nb_api.tables[table].rows.values(): if ls.name == ls_name: ls_lbs = [lb.uuid for lb in ls.load_balancer] return lb_uuid in ls_lbs return False @tenacity.retry( retry=tenacity.retry_if_exception_type(odb_exc.DBError), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_attempt(3), reraise=True) def _create_router(self, name, gw_info=None): router = {'router': {'name': name, 'admin_state_up': True, 'tenant_id': self._tenant_id}} if gw_info: router['router']['external_gateway_info'] = gw_info router = self.l3_plugin.create_router(self.context, router) return router['id'] @tenacity.retry( retry=tenacity.retry_if_exception_type(odb_exc.DBError), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_attempt(3), reraise=True) def _create_net(self, name): n1 = self._make_network(self.fmt, name, True) return n1 @tenacity.retry( retry=tenacity.retry_if_exception_type(odb_exc.DBError), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_attempt(3), reraise=True) def _create_subnet_from_net(self, net, cidr, ip_version=n_const.IP_VERSION_4): res = self._create_subnet(self.fmt, net['network']['id'], cidr, ip_version=ip_version) subnet = self.deserialize(self.fmt, res)['subnet'] self._local_net_cache[subnet['id']] = net['network']['id'] self._local_cidr_cache[subnet['id']] = subnet['cidr'] return net['network']['id'], subnet['id'] @tenacity.retry( retry=tenacity.retry_if_exception_type(odb_exc.DBError), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_attempt(3), reraise=True) def _attach_router_to_subnet(self, subnet_id, router_id): self.l3_plugin.add_router_interface( self.context, router_id, {'subnet_id': subnet_id}) @tenacity.retry( retry=tenacity.retry_if_exception_type(odb_exc.DBError), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_attempt(3), reraise=True) def _create_port_on_network(self, net): port = self._make_port(self.fmt, net['network']['id']) self._local_port_cache['ports'].append( self._port_dict_to_mock(port['port'])) port_address = port['port']['fixed_ips'][0]['ip_address'] return (port_address, port['port']['id']) def _update_ls_refs(self, lb_data, net_id, add_ref=True): if not net_id.startswith(ovn_const.LR_REF_KEY_HEADER): net_id = ovn_const.LR_REF_KEY_HEADER + '%s' % net_id if add_ref: if net_id not in lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY]: lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1 else: lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] += 1 else: ref_ct = lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] if ref_ct <= 0: del lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] def _wait_for_status(self, expected_statuses, check_call=True): call_count = len(expected_statuses) update_loadbalancer_status = ( self._o_driver_lib.update_loadbalancer_status) n_utils.wait_until_true( lambda: update_loadbalancer_status.call_count == call_count, timeout=10) if check_call: # NOTE(mjozefcz): The updates are send in parallel and includes # dicts with unordered lists inside. So we can't simply use # assert_has_calls here. Sample structure: # {'listeners': [], # 'loadbalancers': [{'id': 'a', 'provisioning_status': 'ACTIVE'}], # 'members': [{'id': 'b', 'provisioning_status': 'DELETED'}, # {'id': 'c', 'provisioning_status': 'DELETED'}], # 'pools': [{'id': 'd', 'operating_status': 'ONLINE', # 'provisioning_status': 'ACTIVE'}]}, updated_statuses = [] for call in update_loadbalancer_status.mock_calls: updated_statuses.append(call[1][0]) calls_found = [] for expected_status in expected_statuses: for updated_status in updated_statuses: # Find status update having equal keys if (sorted(updated_status.keys()) == sorted(expected_status.keys())): val_check = [] # Withing this status update check if all values of # expected keys match. for k, v in expected_status.items(): ex = sorted(expected_status[k], key=lambda x: x['id']) ox = sorted(updated_status[k], key=lambda x: x['id']) val_check.append(all(item in ox for item in ex)) if False in val_check: # At least one value don't match. continue calls_found.append(expected_status) break # Validate if we found all expected calls. self.assertCountEqual(expected_statuses, calls_found) def _wait_for_status_and_validate(self, lb_data, expected_status, check_call=True): self._wait_for_status(expected_status, check_call) expected_lbs = self._make_expected_lbs(lb_data) self._validate_loadbalancers(expected_lbs) def _create_load_balancer_custom_lr_ls_and_validate( self, admin_state_up=True, create_router=True, force_retry_ls_to_lr_assoc=True): self._o_driver_lib.update_loadbalancer_status.reset_mock() net_info = [] net1 = self._create_net('n' + uuidutils.generate_uuid()[:4]) network_id1, subnet_id1 = self._create_subnet_from_net( net1, '10.0.1.0/24') port_address1, port_id1 = self._create_port_on_network(net1) net_info.append((network_id1, subnet_id1, port_address1, port_id1)) net2 = self._create_net('n' + uuidutils.generate_uuid()[:4]) network_id2, subnet_id2 = self._create_subnet_from_net( net2, '10.0.2.0/24') port_address2, port_id2 = self._create_port_on_network(net2) net_info.append((network_id2, subnet_id2, port_address2, port_id2)) net3 = self._create_net('n' + uuidutils.generate_uuid()[:4]) network_id3, subnet_id3 = self._create_subnet_from_net( net3, '10.0.3.0/24') port_address3, port_id3 = self._create_port_on_network(net3) net_info.append((network_id3, subnet_id3, port_address3, port_id3)) r_id = self._create_router( 'r' + uuidutils.generate_uuid()[:4]) if create_router else None self._attach_router_to_subnet(subnet_id1, r_id) self._attach_router_to_subnet(subnet_id2, r_id) self._attach_router_to_subnet(subnet_id3, r_id) lb_data = {} lb_data['model'] = self._create_lb_model( vip=net_info[0][2], vip_network_id=net_info[0][0], vip_subnet_id=net_info[0][1], vip_port_id=net_info[0][3], admin_state_up=admin_state_up) lb_data[ovn_const.LB_EXT_IDS_LR_REF_KEY] = \ (ovn_const.LR_REF_KEY_HEADER + r_id) lb_data['vip_net_info'] = net_info[0] lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = {} lb_data['listeners'] = [] lb_data['pools'] = [] self._update_ls_refs(lb_data, net_info[0][0]) ls = [ovn_const.LR_REF_KEY_HEADER + net[0] for net in net_info] if force_retry_ls_to_lr_assoc: ls_foo = copy.deepcopy(ls) ls_foo.append('neutron-foo') self.ovn_driver._ovn_helper._find_ls_for_lr = mock.MagicMock() self.ovn_driver._ovn_helper._find_ls_for_lr.side_effect = \ [ls_foo, ls] self.ovn_driver.loadbalancer_create(lb_data['model']) # NOTE(froyo): This sleep is configured here due to previous call # is also modifiend LB VIP port, as same way the following update # port call. For some reason this second was not propagated. time.sleep(1) name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, lb_data['model'].loadbalancer_id) self.driver.update_port( self.context, net_info[0][3], {'port': {'name': name}}) if lb_data['model'].admin_state_up: expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.ONLINE}] } else: expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.OFFLINE}] } self._wait_for_status_and_validate(lb_data, [expected_status]) self.assertTrue( self._is_lb_associated_to_ls( lb_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + net_info[0][0])) # NOTE(froyo): Just to check all net connected to lr have a # reference to lb for net_id in ls: self.assertTrue( self._is_lb_associated_to_ls( lb_data['model'].loadbalancer_id, net_id)) return lb_data def _create_load_balancer_and_validate(self, network_id, subnet_id, port_address, port_id, admin_state_up=True, only_model=False, router_id=None, multiple_lb=False, additional_vips=[]): self._o_driver_lib.update_loadbalancer_status.reset_mock() lb_data = {} if router_id: lb_data[ovn_const.LB_EXT_IDS_LR_REF_KEY] = ( ovn_const.LR_REF_KEY_HEADER + router_id) lb_data['vip_net_info'] = ( network_id, subnet_id, port_address, port_id) lb_data['model'] = self._create_lb_model( vip=port_address, vip_network_id=network_id, vip_subnet_id=subnet_id, vip_port_id=port_id, admin_state_up=admin_state_up, additional_vips=additional_vips) lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = {} lb_data['listeners'] = [] lb_data['pools'] = [] self._update_ls_refs(lb_data, network_id) if only_model: return lb_data self.ovn_driver.loadbalancer_create(lb_data['model']) # NOTE(froyo): This sleep is configured here due to previous call # is also modifiend LB VIP port, as same way the following update # port call. For some reason this second was not propagated. time.sleep(1) name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, lb_data['model'].loadbalancer_id) self.driver.update_port( self.context, port_id, {'port': {'name': name}}) if additional_vips: for index, add_vip in enumerate(additional_vips, start=1): name = name = '%s%s-%s' % ( ovn_const.LB_VIP_ADDIT_PORT_PREFIX, index, lb_data['model'].loadbalancer_id) self.driver.update_port( self.context, add_vip['port_id'], {'port': {'name': name}}) if lb_data['model'].admin_state_up: expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.ONLINE}] } else: expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.OFFLINE}] } if not multiple_lb: self._wait_for_status_and_validate(lb_data, [expected_status]) else: l_id = lb_data['model'].loadbalancer_id self._wait_for_status([expected_status]) self.assertIn(l_id, [lb['name'] for lb in self._get_loadbalancers()]) self.assertTrue( self._is_lb_associated_to_ls( lb_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + network_id)) return lb_data def _update_load_balancer_and_validate(self, lb_data, admin_state_up=None): self._o_driver_lib.update_loadbalancer_status.reset_mock() if admin_state_up is not None: lb_data['model'].admin_state_up = admin_state_up self.ovn_driver.loadbalancer_update( lb_data['model'], lb_data['model']) if lb_data['model'].admin_state_up: expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.ONLINE}] } else: expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.OFFLINE}] } self._wait_for_status_and_validate(lb_data, [expected_status]) def _delete_load_balancer_and_validate(self, lb_data, cascade=False, multiple_lb=False): self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.loadbalancer_delete(lb_data['model'], cascade) expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "DELETED", "operating_status": "OFFLINE"}] } if cascade: expected_status['pools'] = [] expected_status['members'] = [] expected_status['listeners'] = [] for pool in lb_data['pools']: expected_status['pools'].append({ 'id': pool.pool_id, 'provisioning_status': 'DELETED'}) for member in pool.members: expected_status['members'].append({ "id": member.member_id, "provisioning_status": "DELETED"}) for listener in lb_data['listeners']: expected_status['listeners'].append({ "id": listener.listener_id, "provisioning_status": "DELETED", "operating_status": "OFFLINE"}) expected_status = { key: value for key, value in expected_status.items() if value} l_id = lb_data['model'].loadbalancer_id lb = lb_data['model'] del lb_data['model'] if not multiple_lb: self._wait_for_status_and_validate(lb_data, [expected_status]) else: self._wait_for_status([expected_status]) self.assertNotIn( l_id, [lbs['name'] for lbs in self._get_loadbalancers()]) vip_net_id = lb_data['vip_net_info'][0] self.assertFalse( self._is_lb_associated_to_ls( lb.loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + vip_net_id)) def _make_expected_lbs(self, lb_data): def _get_lb_field_by_protocol(protocol, field='external_ids'): "Get needed external_ids and pass by reference" lb = [lb for lb in expected_lbs if lb.get('protocol') == [protocol]] return lb[0].get(field) if not lb_data or not lb_data.get('model'): return [] vip_net_info = lb_data['vip_net_info'] external_ids = {ovn_const.LB_EXT_IDS_LS_REFS_KEY: {}, 'neutron:vip': lb_data['model'].vip_address, 'neutron:vip_port_id': vip_net_info[3], 'enabled': str(lb_data['model'].admin_state_up)} # if there are any additional_vip on model if lb_data['model'].additional_vips: external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY] = '' external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY] = '' for addi_vip in lb_data['model'].additional_vips: external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY] += \ addi_vip['ip_address'] + ',' external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY] += \ addi_vip['port_id'] + ',' external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY] = \ external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY][:-1] external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY] = \ external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY][:-1] # NOTE(mjozefcz): By default we don't set protocol. We don't know if # listener/pool would be TCP, UDP or SCTP, so do not set it. expected_protocols = set() # Lets fetch list of L4 protocols defined for this LB. for p in lb_data['pools']: expected_protocols.add(p.protocol.lower()) for listener in lb_data['listeners']: expected_protocols.add(listener.protocol.lower()) # If there is no protocol lets add default - empty []. expected_protocols = list(expected_protocols) if len(expected_protocols) == 0: expected_protocols.append(None) expected_lbs = [] for protocol in expected_protocols: lb = {'name': lb_data['model'].loadbalancer_id, 'protocol': [protocol] if protocol else [], 'vips': {}, 'external_ids': copy.deepcopy(external_ids)} if self.ovn_driver._ovn_helper._are_selection_fields_supported(): lb['selection_fields'] = ovn_const.LB_SELECTION_FIELDS_MAP[ o_constants.LB_ALGORITHM_SOURCE_IP_PORT] expected_lbs.append(lb) # For every connected subnet to the LB set the ref # counter. for net_id, ref_ct in lb_data[ ovn_const.LB_EXT_IDS_LS_REFS_KEY].items(): for lb in expected_lbs: # If given LB hasn't VIP configured from # this network we shouldn't touch it here. if net_id == 'neutron-%s' % lb_data['model'].vip_network_id: lb.get('external_ids')[ ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1 if lb_data['model'].additional_vips: lb.get('external_ids')[ ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] += \ len(lb_data['model'].additional_vips) # For every connected router set it here. if lb_data.get(ovn_const.LB_EXT_IDS_LR_REF_KEY): for lb in expected_lbs: lb.get('external_ids')[ ovn_const.LB_EXT_IDS_LR_REF_KEY] = lb_data[ ovn_const.LB_EXT_IDS_LR_REF_KEY] pool_info = {} for p in lb_data.get('pools', []): member_status = {} external_ids = _get_lb_field_by_protocol( p.protocol.lower(), field='external_ids') p_members = "" for m in p.members: m_info = 'member_' + m.member_id + '_' + m.address m_info += ":" + str(m.protocol_port) m_info += "_" + str(m.subnet_id) if p_members: p_members += "," + m_info else: p_members = m_info # Bump up LS refs counter if needed. if m.subnet_id: found = False # Need to get the network_id. for port in self._local_port_cache['ports']: if not found: for fixed_ip in port.fixed_ips: if fixed_ip['subnet_id'] == m.subnet_id: ex = external_ids[ ovn_const.LB_EXT_IDS_LS_REFS_KEY] act = ex.get( 'neutron-%s' % port.network_id, 0) ex['neutron-%s' % port.network_id] = \ act + 1 found = True if p.healthmonitor: member_status[m.member_id] = o_constants.ONLINE external_ids[ovn_const.LB_EXT_IDS_HMS_KEY] = \ jsonutils.dumps([p.healthmonitor.healthmonitor_id]) else: if m.admin_state_up: member_status[m.member_id] = o_constants.NO_MONITOR else: member_status[m.member_id] = o_constants.OFFLINE pool_key = 'pool_' + p.pool_id if not p.admin_state_up: pool_key += ':D' external_ids[pool_key] = p_members pool_info[p.pool_id] = p_members if member_status: external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = member_status for listener in lb_data['listeners']: expected_vips = _get_lb_field_by_protocol( listener.protocol.lower(), field='vips') external_ids = _get_lb_field_by_protocol( listener.protocol.lower(), field='external_ids') listener_k = 'listener_' + str(listener.listener_id) if lb_data['model'].admin_state_up and listener.admin_state_up: vips_k = [lb_data['model'].vip_address + ":" + str( listener.protocol_port)] # idem for additional vips if exists if lb_data['model'].additional_vips: for addi_vip in lb_data['model'].additional_vips: vips_k.append(addi_vip['ip_address'] + ":" + str( listener.protocol_port)) if not isinstance(listener.default_pool_id, octavia_data_model.UnsetType) and pool_info[ listener.default_pool_id]: for vip_k in vips_k: expected_vips[vip_k] = self._extract_member_info( pool_info[listener.default_pool_id]) else: listener_k += ':D' external_ids[listener_k] = str(listener.protocol_port) + ":" if not isinstance(listener.default_pool_id, octavia_data_model.UnsetType): external_ids[listener_k] += 'pool_' + listener.default_pool_id elif lb_data.get('pools', []): external_ids[listener_k] += 'pool_' + lb_data[ 'pools'][0].pool_id return expected_lbs def _extract_member_info(self, member): mem_info = '' if member: for item in member.split(','): mem_info += item.split('_')[2] + "," return mem_info[:-1] def _create_pool_and_validate(self, lb_data, pool_name, protocol=None, listener_id=None): lb_pools = lb_data['pools'] m_pool = self._create_pool_model(lb_data['model'].loadbalancer_id, pool_name, protocol=protocol, listener_id=listener_id) lb_pools.append(m_pool) self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.pool_create(m_pool) operating_status = ( o_constants.ONLINE if listener_id else o_constants.OFFLINE) expected_status = { 'pools': [{'id': m_pool.pool_id, 'provisioning_status': 'ACTIVE', 'operating_status': operating_status}], 'loadbalancers': [{'id': m_pool.loadbalancer_id, 'provisioning_status': 'ACTIVE'}] } if listener_id: expected_status['listeners'] = [ {'id': listener_id, 'provisioning_status': 'ACTIVE'}] self._wait_for_status_and_validate(lb_data, [expected_status]) expected_lbs = self._make_expected_lbs(lb_data) self._validate_loadbalancers(expected_lbs) def _update_pool_and_validate(self, lb_data, pool_name, admin_state_up=None): self._o_driver_lib.update_loadbalancer_status.reset_mock() m_pool = self._get_pool_from_lb_data(lb_data, pool_name=pool_name) old_admin_state_up = m_pool.admin_state_up operating_status = 'ONLINE' if admin_state_up is not None: m_pool.admin_state_up = admin_state_up if not admin_state_up: operating_status = 'OFFLINE' pool_listeners = self._get_pool_listeners(lb_data, m_pool.pool_id) expected_listener_status = [ {'id': listener.listener_id, 'provisioning_status': 'ACTIVE'} for listener in pool_listeners] self.ovn_driver.pool_update(m_pool, m_pool) expected_status = { 'pools': [{'id': m_pool.pool_id, 'provisioning_status': 'ACTIVE', 'operating_status': operating_status}], 'loadbalancers': [{'id': m_pool.loadbalancer_id, 'provisioning_status': 'ACTIVE'}], 'listeners': expected_listener_status } if old_admin_state_up != m_pool.admin_state_up: if m_pool.admin_state_up: oper_status = o_constants.ONLINE else: oper_status = o_constants.OFFLINE expected_status['pools'][0]['operating_status'] = oper_status self._wait_for_status_and_validate(lb_data, [expected_status]) def _delete_pool_and_validate(self, lb_data, pool_name, listener_id=None): self._o_driver_lib.update_loadbalancer_status.reset_mock() p = self._get_pool_from_lb_data(lb_data, pool_name=pool_name) self.ovn_driver.pool_delete(p) lb_data['pools'].remove(p) expected_status = [] # When a pool is deleted and if it has a health_monitor associated or # any members, there are expected to be deleted. if p.healthmonitor: member_statuses = [{"id": m.member_id, "provisioning_status": o_constants.ACTIVE, "operating_status": o_constants.NO_MONITOR} for m in p.members] expected_status.append( {'healthmonitors': [{ "id": p.healthmonitor.healthmonitor_id, "provisioning_status": o_constants.DELETED, "operating_status": o_constants.NO_MONITOR}], 'members': member_statuses, 'loadbalancers': [{ "id": p.loadbalancer_id, "provisioning_status": o_constants.ACTIVE}], 'pools': [{"id": p.pool_id, "provisioning_status": o_constants.ACTIVE}], 'listeners': []}) for m in p.members: expected_status.append( {'pools': [{"id": p.pool_id, "provisioning_status": o_constants.ACTIVE, "operating_status": o_constants.ONLINE}], 'members': [{"id": m.member_id, "provisioning_status": o_constants.DELETED}], 'loadbalancers': [{ "id": p.loadbalancer_id, "provisioning_status": o_constants.ACTIVE, 'operating_status': o_constants.ONLINE}], 'listeners': []}) self._update_ls_refs( lb_data, self._local_net_cache[m.subnet_id], add_ref=False) if p.members: # If Pool has members, delete all members of the pool. When the # last member is processed set Operating status of Pool as Offline expected_status[-1]['pools'][0][ 'operating_status'] = o_constants.OFFLINE pool_dict = { 'pools': [{'id': p.pool_id, 'provisioning_status': 'DELETED'}], 'loadbalancers': [{'id': p.loadbalancer_id, 'provisioning_status': 'ACTIVE'}], 'listeners': [] } if listener_id: pool_dict['listeners'] = [{'id': listener_id, 'provisioning_status': 'ACTIVE'}] expected_status.append(pool_dict) self._wait_for_status_and_validate(lb_data, expected_status) def _get_pool_from_lb_data(self, lb_data, pool_id=None, pool_name=None): for p in lb_data['pools']: if pool_id and p.pool_id == pool_id: return p if pool_name and p.name == pool_name: return p def _get_listener_from_lb_data(self, lb_data, protocol, protocol_port): for listener in lb_data['listeners']: if (listener.protocol_port == protocol_port and listener.protocol == protocol): return listener def _get_pool_listeners(self, lb_data, pool_id): listeners = [] for listener in lb_data['listeners']: if listener.default_pool_id == pool_id: listeners.append(listener) return listeners def _create_member_and_validate(self, lb_data, pool_id, subnet_id, network_id, address, expected_subnet=None): self._o_driver_lib.update_loadbalancer_status.reset_mock() pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) pool_status = {'id': pool.pool_id, 'provisioning_status': o_constants.ACTIVE, 'operating_status': o_constants.ONLINE} m_member = self._create_member_model(pool.pool_id, subnet_id, address) # The "expected" member value, which might be different from what # we pass to member_create(), for example, if an expected_subnet # was given. if expected_subnet: e_member = copy.deepcopy(m_member) e_member.subnet_id = expected_subnet else: e_member = m_member pool.members.append(e_member) self.ovn_driver.member_create(m_member) self._update_ls_refs(lb_data, network_id) pool_listeners = self._get_pool_listeners(lb_data, pool_id) expected_listener_status = [ {'id': listener.listener_id, 'provisioning_status': 'ACTIVE', 'operating_status': o_constants.ONLINE} for listener in pool_listeners] expected_status = { 'pools': [pool_status], 'members': [{"id": m_member.member_id, "provisioning_status": "ACTIVE", "operating_status": o_constants.NO_MONITOR}], 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': 'ACTIVE', 'operating_status': o_constants.ONLINE}], 'listeners': expected_listener_status } self._wait_for_status_and_validate(lb_data, [expected_status]) def _get_pool_member(self, pool, member_address): for m in pool.members: if m.address == member_address: return m def _update_member_and_validate(self, lb_data, pool_id, member_address, remove_subnet_id=False, admin_state_up=True): pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) member = self._get_pool_member(pool, member_address) self._o_driver_lib.update_loadbalancer_status.reset_mock() old_member = copy.deepcopy(member) member.admin_state_up = admin_state_up # NOTE(froyo): In order to test update of member without passing the # subnet_id parameter of the member, just to cover the case when a new # member has been created without passing that argument if remove_subnet_id: old_member.subnet_id = None self.ovn_driver.member_update(old_member, member) expected_status = { 'pools': [{'id': pool.pool_id, 'provisioning_status': 'ACTIVE', 'operating_status': o_constants.ONLINE}], 'members': [{"id": member.member_id, 'provisioning_status': 'ACTIVE'}], 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': 'ACTIVE', 'operating_status': o_constants.ONLINE}], 'listeners': [] } if getattr(member, 'admin_state_up', None): expected_status['members'][0][ 'operating_status'] = o_constants.NO_MONITOR else: expected_status['members'][0]['operating_status'] = "OFFLINE" self._wait_for_status_and_validate(lb_data, [expected_status]) def _update_members_in_batch_and_validate(self, lb_data, pool_id, members): pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) expected_status = [] self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.member_batch_update(pool_id, members) for member in members: expected_status.append( {'pools': [{'id': pool.pool_id, 'provisioning_status': 'ACTIVE'}], 'members': [{'id': member.member_id, 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': 'ACTIVE', 'operating_status': o_constants.ONLINE}], 'listeners': []}) for m in pool.members: found = False for member in members: if member.member_id == m.member_id: found = True break if not found: expected_status.append( {'pools': [{'id': pool.pool_id, 'provisioning_status': 'ACTIVE'}], 'members': [{'id': m.member_id, 'provisioning_status': 'DELETED'}], 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': 'ACTIVE'}], 'listeners': []}) # Delete member from lb_data pool.members.remove(m) self._wait_for_status_and_validate(lb_data, expected_status, check_call=False) def _delete_member_and_validate(self, lb_data, pool_id, network_id, member_address, remove_subnet_id=False): pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) member = self._get_pool_member(pool, member_address) pool.members.remove(member) pool_status = {"id": pool.pool_id, "provisioning_status": o_constants.ACTIVE, "operating_status": o_constants.ONLINE} if not pool.members: pool_status['operating_status'] = o_constants.OFFLINE self._o_driver_lib.update_loadbalancer_status.reset_mock() # NOTE(froyo): In order to test deletion of member without passing # the subnet_id parameter of the member, just to cover the case when # a new member has been created without passing that argument m_member = copy.deepcopy(member) if remove_subnet_id: m_member.subnet_id = None self.ovn_driver.member_delete(m_member) expected_status = { 'pools': [pool_status], 'members': [{"id": member.member_id, "provisioning_status": "DELETED"}], 'loadbalancers': [{"id": pool.loadbalancer_id, 'provisioning_status': 'ACTIVE', 'operating_status': o_constants.ONLINE}], 'listeners': []} self._update_ls_refs(lb_data, network_id, add_ref=False) self._wait_for_status_and_validate(lb_data, [expected_status]) def _create_hm_and_validate(self, lb_data, pool_id, name, delay, timeout, max_retries, hm_type): self._o_driver_lib.update_loadbalancer_status.reset_mock() pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) pool_status = {'id': pool.pool_id, 'provisioning_status': o_constants.ACTIVE, 'operating_status': o_constants.ONLINE} m_hm = self._create_hm_model(pool.pool_id, name, delay, timeout, max_retries, hm_type) pool.healthmonitor = m_hm self.ovn_driver._ovn_helper._update_hm_member = mock.MagicMock() self.ovn_driver._ovn_helper._update_hm_member.side_effect = [ o_constants.ONLINE, o_constants.ONLINE] self.ovn_driver.health_monitor_create(m_hm) pool_listeners = self._get_pool_listeners(lb_data, pool_id) expected_listener_status = [ {'id': listener.listener_id, 'provisioning_status': o_constants.ACTIVE} for listener in pool_listeners] expected_member_status = [ {'id': m.member_id, 'provisioning_status': o_constants.ACTIVE, 'operating_status': o_constants.ONLINE} for m in pool.members] expected_hm_status = {'id': m_hm.healthmonitor_id, 'provisioning_status': o_constants.ACTIVE, 'operating_status': o_constants.ONLINE} expected_status = { 'pools': [pool_status], 'members': expected_member_status, 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': o_constants.ACTIVE, 'operating_status': o_constants.ONLINE}], 'listeners': expected_listener_status, 'healthmonitors': [expected_hm_status] } self._wait_for_status_and_validate(lb_data, [expected_status]) def _update_hm_and_validate(self, lb_data, pool_id, admin_state_up=None): self._o_driver_lib.update_loadbalancer_status.reset_mock() pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) hm = pool.healthmonitor old_hm = copy.deepcopy(hm) operating_status = o_constants.ONLINE if admin_state_up is not None: hm.admin_state_up = admin_state_up if not admin_state_up: operating_status = o_constants.OFFLINE pool_status = {"id": pool.pool_id, "provisioning_status": o_constants.ACTIVE} self.ovn_driver.health_monitor_update(old_hm, hm) expected_hm_status = {'id': hm.healthmonitor_id, 'provisioning_status': o_constants.ACTIVE, 'operating_status': operating_status} expected_status = { 'pools': [pool_status], 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': o_constants.ACTIVE}], 'healthmonitors': [expected_hm_status]} self._wait_for_status_and_validate(lb_data, [expected_status]) def _delete_hm_and_validate(self, lb_data, pool_id): self._o_driver_lib.update_loadbalancer_status.reset_mock() pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) hm = pool.healthmonitor pool.healthmonitor = None pool_status = {'id': pool.pool_id, 'provisioning_status': o_constants.ACTIVE} pool_listeners = self._get_pool_listeners(lb_data, pool_id) expected_listener_status = [ {'id': listener.listener_id, 'provisioning_status': o_constants.ACTIVE} for listener in pool_listeners] self.ovn_driver.health_monitor_delete(hm) expected_hm_status = {'id': hm.healthmonitor_id, 'provisioning_status': o_constants.DELETED, 'operating_status': o_constants.NO_MONITOR} expected_member_status = [ {'id': m.member_id, 'provisioning_status': o_constants.ACTIVE, 'operating_status': o_constants.NO_MONITOR} for m in pool.members] expected_status = { 'pools': [pool_status], 'loadbalancers': [{"id": pool.loadbalancer_id, "provisioning_status": o_constants.ACTIVE}], 'members': expected_member_status, 'listeners': expected_listener_status, 'healthmonitors': [expected_hm_status]} self._wait_for_status_and_validate(lb_data, [expected_status]) def _create_listener_and_validate(self, lb_data, pool_id=None, protocol_port=80, admin_state_up=True, protocol='TCP'): if pool_id: pool = self._get_pool_from_lb_data(lb_data, pool_id=pool_id) loadbalancer_id = pool.loadbalancer_id pool_id = pool.pool_id else: loadbalancer_id = lb_data['model'].loadbalancer_id pool_id = None m_listener = self._create_listener_model(loadbalancer_id, pool_id, protocol_port, protocol=protocol, admin_state_up=admin_state_up) lb_data['listeners'].append(m_listener) self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.listener_create(m_listener) expected_status = { 'listeners': [{'id': m_listener.listener_id, 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'loadbalancers': [{'id': m_listener.loadbalancer_id, 'provisioning_status': "ACTIVE"}]} self._wait_for_status_and_validate(lb_data, [expected_status]) def _update_listener_and_validate(self, lb_data, protocol_port=80, admin_state_up=None, protocol='TCP'): m_listener = self._get_listener_from_lb_data( lb_data, protocol, protocol_port) self._o_driver_lib.update_loadbalancer_status.reset_mock() old_admin_state_up = m_listener.admin_state_up operating_status = 'ONLINE' if admin_state_up is not None: m_listener.admin_state_up = admin_state_up if not admin_state_up: operating_status = 'OFFLINE' m_listener.protocol = protocol self.ovn_driver.listener_update(m_listener, m_listener) pool_status = [{'id': m_listener.default_pool_id, 'provisioning_status': 'ACTIVE'}] expected_status = { 'listeners': [{'id': m_listener.listener_id, 'provisioning_status': 'ACTIVE', 'operating_status': operating_status}], 'loadbalancers': [{"id": m_listener.loadbalancer_id, "provisioning_status": "ACTIVE"}], 'pools': pool_status} if old_admin_state_up != m_listener.admin_state_up: if m_listener.admin_state_up: oper_status = o_constants.ONLINE else: oper_status = o_constants.OFFLINE expected_status['listeners'][0]['operating_status'] = oper_status self._wait_for_status_and_validate(lb_data, [expected_status]) def _delete_listener_and_validate(self, lb_data, protocol='TCP', protocol_port=80): m_listener = self._get_listener_from_lb_data( lb_data, protocol, protocol_port) lb_data['listeners'].remove(m_listener) self._o_driver_lib.update_loadbalancer_status.reset_mock() self.ovn_driver.listener_delete(m_listener) expected_status = { 'listeners': [{"id": m_listener.listener_id, "provisioning_status": "DELETED", "operating_status": "OFFLINE"}], 'loadbalancers': [{"id": m_listener.loadbalancer_id, "provisioning_status": "ACTIVE"}]} self._wait_for_status_and_validate(lb_data, [expected_status]) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/functional/requirements.txt0000664000175100017510000000040215033037524031005 0ustar00mylesmyles# Additional requirements for functional tests # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/functional/test_agent.py0000664000175100017510000003704115033037524030241 0ustar00mylesmyles# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import multiprocessing as mp from neutron.common import utils as n_utils from oslo_utils import uuidutils from ovsdbapp.backend.ovs_idl import connection from ovn_octavia_provider import agent as ovn_agent from ovn_octavia_provider.common import config as ovn_config from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider import event as ovn_event from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.ovsdb import impl_idl_ovn from ovn_octavia_provider.tests.functional import base as ovn_base class TestOvnOctaviaProviderAgent(ovn_base.TestOvnOctaviaBase): def setUp(self): super().setUp() self._initialize_ovn_da() def _initialize_ovn_da(self): # NOTE(mjozefcz): In theory this is separate process # with IDL running, but to make it easier for now # we can initialize this IDL here instead spawning # another process. ovn_config.register_opts() da_helper = ovn_helper.OvnProviderHelper() events = [ovn_event.LogicalRouterPortEvent(da_helper), ovn_event.LogicalSwitchPortUpdateEvent(da_helper)] ovn_nb_idl_for_events = impl_idl_ovn.OvnNbIdlForLb( event_lock_name='func_test') ovn_nb_idl_for_events.notify_handler.watch_events(events) c = connection.Connection(ovn_nb_idl_for_events, ovn_config.get_ovn_ovsdb_timeout()) ovn_nbdb_api = impl_idl_ovn.OvsdbNbOvnIdl(c) atexit.register(ovn_nbdb_api.ovsdb_connection.stop) def _test_lrp_event_handler(self, cascade=False): # Create Network N1 on router R1 and LBA on N1 network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) # Create Network N2, connect it to R1 network_N2 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id2, subnet2_id = self._create_subnet_from_net( network_N2, '10.0.1.0/24') port_address2, port_id2 = self._create_port_on_network(network_N2) # Create Network N3 network_N3 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id3, subnet3_id = self._create_subnet_from_net( network_N3, '10.0.2.0/24') port_address3, port_id3 = self._create_port_on_network(network_N3) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) self._attach_router_to_subnet(subnet2_id, r1_id) lba_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) # Check if LBA exists in N2 LS n_utils.wait_until_true( lambda: self._is_lb_associated_to_ls( lba_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + network_id2), timeout=10) lbb_data = self._create_load_balancer_and_validate( network_id3, subnet3_id, port_address3, port_id3, multiple_lb=True) # Add N3 to R1 self.l3_plugin.add_router_interface( self.context, lba_data[ ovn_const.LB_EXT_IDS_LR_REF_KEY][ len(ovn_const.LR_REF_KEY_HEADER):], {'subnet_id': lbb_data['vip_net_info'][1]}) # Check LBB exists on R1 n_utils.wait_until_true( lambda: self._is_lb_associated_to_lr( lbb_data['model'].loadbalancer_id, lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY]), timeout=10) # Check LBA connected to N3 n_utils.wait_until_true( lambda: self._is_lb_associated_to_ls( lba_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + lbb_data['vip_net_info'][0]), timeout=10) # Check LBB connected to N1 n_utils.wait_until_true( lambda: self._is_lb_associated_to_ls( lbb_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + lba_data['vip_net_info'][0]), timeout=10) # Check LBB connected to N2 n_utils.wait_until_true( lambda: self._is_lb_associated_to_ls( lbb_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + network_id2), timeout=10) lbb_id = lbb_data['model'].loadbalancer_id if not cascade: # N3 removed from R1 self.l3_plugin.remove_router_interface( self.context, lba_data[ ovn_const.LB_EXT_IDS_LR_REF_KEY][ len(ovn_const.LR_REF_KEY_HEADER):], {'subnet_id': lbb_data['vip_net_info'][1]}) else: # Delete LBB Cascade self._delete_load_balancer_and_validate(lbb_data, cascade=True, multiple_lb=True) # Check LBB doesn't exists on R1 n_utils.wait_until_true( lambda: not self._is_lb_associated_to_lr( lbb_id, lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY]), timeout=10) # Check LBB not connected to N1 n_utils.wait_until_true( lambda: not self._is_lb_associated_to_ls( lbb_id, ovn_const.LR_REF_KEY_HEADER + lba_data['vip_net_info'][0]), timeout=10) # Check LBB not connected to N2 n_utils.wait_until_true( lambda: not self._is_lb_associated_to_ls( lbb_id, ovn_const.LR_REF_KEY_HEADER + network_id2), timeout=10) def test_lrp_event_handler_with_interface_delete(self): self._test_lrp_event_handler() def test_lrp_event_handler_with_loadbalancer_cascade_delete(self): self._test_lrp_event_handler(cascade=True) def test_lrp_event_handler_lrp_with_external_gateway(self): # Create Network N1 on router R1 and LBA on N1 network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) lba_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) # Create provider network N2, connect it to R1 provider_net, provider_subnet = self._create_provider_network() self.l3_plugin.update_router( self.context, r1_id, {'router': { 'id': r1_id, 'external_gateway_info': { 'enable_snat': True, 'network_id': provider_net['network']['id'], 'external_fixed_ips': [ {'ip_address': '100.0.0.2', 'subnet_id': provider_subnet['subnet']['id']}]}}}) # Check if LBA doesn't exist in provider network LS n_utils.wait_until_true( lambda: not self._is_lb_associated_to_ls( lba_data['model'].loadbalancer_id, ovn_const.LR_REF_KEY_HEADER + provider_net['network']['id']), timeout=10) def test_fip_on_lb_vip(self): """This test checks if FIP on LB VIP is configured. This test validates if Load_Balancer VIP field consist Floating IP address that is configured on LB VIP port. """ network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) # Create a pool self._create_pool_and_validate(lb_data, "p1") pool_id = lb_data['pools'][0].pool_id # Create listener self._create_listener_and_validate(lb_data, pool_id, 80) # Create Member-1 and associate it with lb_data self._create_member_and_validate( lb_data, pool_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') # Create provider network. e1, e1_s1 = self._create_provider_network() # Configure external_gateway for router router_id = lb_data['lr_ref'][8::] self.l3_plugin.update_router( self.context, router_id, {'router': { 'id': router_id, 'external_gateway_info': { 'enable_snat': True, 'network_id': e1['network']['id'], 'external_fixed_ips': [ {'ip_address': '100.0.0.2', 'subnet_id': e1_s1['subnet']['id']}]}}}) # Create floating IP on LB VIP port vip_port_id = lb_data['model'].vip_port_id vip_port = self.core_plugin.get_ports( self.context, filters={'id': [vip_port_id]})[0] self.l3_plugin.create_floatingip( self.context, {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': e1['network']['id'], 'subnet_id': None, 'floating_ip_address': '100.0.0.20', 'port_id': vip_port['id']}}) # Validate if FIP is stored as VIP in LB lbs = self._get_loadbalancers() expected_vips = { '%s:80' % vip_port['fixed_ips'][0]['ip_address']: '10.0.0.10:80', '100.0.0.20:80': '10.0.0.10:80'} self.assertDictEqual(expected_vips, lbs[0].get('vips')) provider_net = 'neutron-%s' % e1['network']['id'] tenant_net = 'neutron-%s' % lb_data['model'].vip_network_id for ls in self.nb_api.tables['Logical_Switch'].rows.values(): if ls.name == tenant_net: # Make sure that LB1 is added to tenant network self.assertIn( lb_data['model'].loadbalancer_id, [lb.name for lb in ls.load_balancer]) elif ls.name == provider_net: # Make sure that LB1 is not added to provider net - e1 LS self.assertListEqual([], ls.load_balancer) def test_fip_on_lb_additional_vip(self): """This test checks if FIP on LB additional VIP is configured. This test validates if Load_Balancer additional VIP field consist Floating IP address that is configured on LB additional VIP port. """ network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) network_ida, subnet_ida = self._create_subnet_from_net( network_N1, '10.0.1.0/24') port_addressa, port_ida = self._create_port_on_network(network_N1) additional_vips_list = [{ 'ip_address': port_addressa, 'port_id': port_ida, 'network_id': network_ida, 'subnet_id': subnet_ida }] r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) self._attach_router_to_subnet(subnet_ida, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id, additional_vips=additional_vips_list) # Create a pool self._create_pool_and_validate(lb_data, "p1") pool_id = lb_data['pools'][0].pool_id # Create listener self._create_listener_and_validate(lb_data, pool_id, 80) # Create Member-1 and associate it with lb_data self._create_member_and_validate( lb_data, pool_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') # Create provider network. e1, e1_s1 = self._create_provider_network() # Configure external_gateway for router self.l3_plugin.update_router( self.context, r1_id, {'router': { 'id': r1_id, 'external_gateway_info': { 'enable_snat': True, 'network_id': e1['network']['id'], 'external_fixed_ips': [ {'ip_address': '100.0.0.2', 'subnet_id': e1_s1['subnet']['id']}]}}}) vip_port_id = lb_data['model'].vip_port_id vip_port = self.core_plugin.get_ports( self.context, filters={'id': [vip_port_id]})[0] # Create floating IP on LB additional VIP port addi_vip_port_id = lb_data['model'].additional_vips[0]['port_id'] vipaddi_vip_port = self.core_plugin.get_ports( self.context, filters={'id': [addi_vip_port_id]})[0] self.l3_plugin.create_floatingip( self.context, {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': e1['network']['id'], 'subnet_id': None, 'floating_ip_address': '100.0.0.20', 'port_id': vipaddi_vip_port['id']}}) # Validate if FIP is stored as VIP in LB lbs = self._get_loadbalancers() expected_vips = { '%s:80' % vip_port['fixed_ips'][0]['ip_address']: '10.0.0.10:80', '%s:80' % vipaddi_vip_port['fixed_ips'][0]['ip_address']: '10.0.0.10:80', '100.0.0.20:80': '10.0.0.10:80'} self.assertDictEqual(expected_vips, lbs[0].get('vips')) provider_net = 'neutron-%s' % e1['network']['id'] tenant_net = 'neutron-%s' % lb_data['model'].vip_network_id for ls in self.nb_api.tables['Logical_Switch'].rows.values(): if ls.name == tenant_net: # Make sure that LB1 is added to tenant network self.assertIn( lb_data['model'].loadbalancer_id, [lb.name for lb in ls.load_balancer]) elif ls.name == provider_net: # Make sure that LB1 is not added to provider net - e1 LS self.assertListEqual([], ls.load_balancer) def test_agent_exit(self): exit_event = mp.Event() agent = mp.Process(target=ovn_agent.OvnProviderAgent, args=[exit_event]) agent.start() self.assertTrue(agent.is_alive()) exit_event.set() agent.join() self.assertFalse(agent.is_alive()) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/functional/test_driver.py0000664000175100017510000007043015033037524030435 0ustar00mylesmyles# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from octavia_lib.api.drivers import exceptions as o_exceptions from octavia_lib.common import constants as o_constants from oslo_utils import uuidutils from ovn_octavia_provider.tests.functional import base as ovn_base class TestOvnOctaviaProviderDriver(ovn_base.TestOvnOctaviaBase): def test_loadbalancer(self): network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) network_ida, subnet_ida = self._create_subnet_from_net( network_N1, '2001:db8:0:1::/64', ip_version=n_const.IP_VERSION_6) port_addressa, port_ida = self._create_port_on_network(network_N1) additional_vips_list = [{ 'ip_address': port_addressa, 'port_id': port_ida, 'network_id': network_ida, 'subnet_id': subnet_ida }] r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) self._attach_router_to_subnet(subnet_ida, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id, additional_vips=additional_vips_list) self._update_load_balancer_and_validate(lb_data, admin_state_up=False) self._update_load_balancer_and_validate(lb_data, admin_state_up=True) self._delete_load_balancer_and_validate(lb_data) # create load balance with admin state down lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id, admin_state_up=False) self._delete_load_balancer_and_validate(lb_data) def test_create_lb_custom_network(self): self._create_load_balancer_custom_lr_ls_and_validate( admin_state_up=True, create_router=True, force_retry_ls_to_lr_assoc=False) def test_create_lb_custom_network_retry(self): self._create_load_balancer_custom_lr_ls_and_validate( admin_state_up=True, create_router=True, force_retry_ls_to_lr_assoc=True) def test_delete_lb_on_nonexisting_lb(self): # LoadBalancer doesnt exist anymore, so just create a model and delete network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '19.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id, only_model=True) self.ovn_driver.loadbalancer_delete(lb_data['model']) expected_status = { 'loadbalancers': [{"id": lb_data['model'].loadbalancer_id, "provisioning_status": "DELETED", "operating_status": "OFFLINE"}], 'listeners': [], 'pools': [], 'members': [], } del lb_data['model'] self._wait_for_status_and_validate(lb_data, [expected_status]) def test_pool(self): network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) self._create_pool_and_validate(lb_data, "p_TCP_1", protocol='TCP') self._update_pool_and_validate(lb_data, "p_TCP_1") self._create_pool_and_validate(lb_data, "p_UDP_1", protocol='UDP') self._create_pool_and_validate(lb_data, "p_SCTP_1", protocol='SCTP') self._create_pool_and_validate(lb_data, "p_TCP_2", protocol='TCP') self._update_pool_and_validate(lb_data, "p_TCP_2", admin_state_up=False) self._update_pool_and_validate(lb_data, "p_TCP_2", admin_state_up=True) self._update_pool_and_validate(lb_data, "p_TCP_2", admin_state_up=False) self._create_pool_and_validate(lb_data, "p_UDP_2", protocol='UDP') self._create_pool_and_validate(lb_data, "p_SCTP_2", protocol='SCTP') self._delete_pool_and_validate(lb_data, "p_SCTP_1") self._delete_pool_and_validate(lb_data, "p_UDP_1") self._delete_pool_and_validate(lb_data, "p_TCP_1") self._delete_load_balancer_and_validate(lb_data) def test_member(self): net1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( net1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(net1) net2 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id2, subnet_id2 = self._create_subnet_from_net( net2, '20.0.0.0/24') self._create_port_on_network(net2) net3 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id3, subnet_id3 = self._create_subnet_from_net( net3, '30.0.0.0/24') self._create_port_on_network(net3) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) self._attach_router_to_subnet(subnet_id2, r1_id) self._attach_router_to_subnet(subnet_id3, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) # TCP Pool self._create_pool_and_validate(lb_data, "p_TCP", protocol='TCP') # UDP Pool self._create_pool_and_validate(lb_data, "p_UDP", protocol='UDP') # SCTP Pool self._create_pool_and_validate(lb_data, "p_SCTP", protocol='SCTP') pool_TCP_id = lb_data['pools'][0].pool_id pool_UDP_id = lb_data['pools'][1].pool_id pool_SCTP_id = lb_data['pools'][2].pool_id # Members for TCP Pool self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._update_member_and_validate(lb_data, pool_TCP_id, "10.0.0.10") self._update_member_and_validate(lb_data, pool_TCP_id, "10.0.0.10", admin_state_up=False) self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.11') # Members for UDP Pool self._create_member_and_validate( lb_data, pool_UDP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._update_member_and_validate(lb_data, pool_UDP_id, "10.0.0.10") self._update_member_and_validate(lb_data, pool_UDP_id, "10.0.0.10", admin_state_up=False) self._create_member_and_validate( lb_data, pool_UDP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.11') # Members for SCTP Pool self._create_member_and_validate( lb_data, pool_SCTP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._update_member_and_validate(lb_data, pool_SCTP_id, "10.0.0.10") self._update_member_and_validate(lb_data, pool_SCTP_id, "10.0.0.10", admin_state_up=False) self._create_member_and_validate( lb_data, pool_SCTP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.11') # Disable loadbalancer self._update_load_balancer_and_validate(lb_data, admin_state_up=False) # Enable loadbalancer back self._update_load_balancer_and_validate(lb_data, admin_state_up=True) # Delete members from TCP Pool self._delete_member_and_validate(lb_data, pool_TCP_id, lb_data['vip_net_info'][0], '10.0.0.10') self._delete_member_and_validate(lb_data, pool_TCP_id, lb_data['vip_net_info'][0], '10.0.0.11') # Add again member to TCP Pool self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._create_member_and_validate(lb_data, pool_TCP_id, subnet_id2, network_id2, '20.0.0.4') self._create_member_and_validate(lb_data, pool_TCP_id, subnet_id2, network_id2, '20.0.0.6') self._create_member_and_validate(lb_data, pool_TCP_id, subnet_id3, network_id3, '30.0.0.6') self._delete_member_and_validate(lb_data, pool_TCP_id, network_id2, '20.0.0.6') # Deleting the pool should also delete the members. self._delete_pool_and_validate(lb_data, "p_TCP") # Delete the whole LB. self._delete_load_balancer_and_validate(lb_data) def test_member_no_subnet(self): self._o_driver_lib.get_pool.return_value = None # Test creating Member without subnet and unknown pool m_member = self._create_member_model('pool_from_nowhere', None, '30.0.0.7', 80) self.assertRaises(o_exceptions.UnsupportedOptionError, self.ovn_driver.member_create, m_member) network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) # TCP Pool self._create_pool_and_validate(lb_data, "p_TCP", protocol='TCP') pool_TCP_id = lb_data['pools'][0].pool_id self._o_driver_lib.get_pool.return_value = lb_data['pools'][0] self._o_driver_lib.get_loadbalancer.return_value = lb_data['model'] # Test deleting a member without subnet self._create_member_and_validate( lb_data, pool_TCP_id, None, lb_data['vip_net_info'][0], '10.0.0.10', expected_subnet=lb_data['vip_net_info'][1]) self._delete_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][0], '10.0.0.10', remove_subnet_id=True) # Test update member without subnet self._create_member_and_validate( lb_data, pool_TCP_id, None, lb_data['vip_net_info'][0], '10.0.0.10', expected_subnet=lb_data['vip_net_info'][1]) self._update_member_and_validate( lb_data, pool_TCP_id, "10.0.0.10", remove_subnet_id=True) # Test creating a Member without subnet but with pool self._create_member_and_validate( lb_data, pool_TCP_id, None, lb_data['vip_net_info'][0], '10.0.0.11', expected_subnet=lb_data['vip_net_info'][1]) # Deleting the pool should also delete the members. self._delete_pool_and_validate(lb_data, "p_TCP") # Delete the whole LB. self._delete_load_balancer_and_validate(lb_data) def test_hm(self): network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) network_ida, subnet_ida = self._create_subnet_from_net( network_N1, '2001:db8:0:1::/64', ip_version=n_const.IP_VERSION_6) port_addressa, port_ida = self._create_port_on_network(network_N1) additional_vips_list = [{ 'ip_address': port_addressa, 'port_id': port_ida, 'network_id': network_ida, 'subnet_id': subnet_ida }] r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) self._attach_router_to_subnet(subnet_ida, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id, additional_vips=additional_vips_list) # TCP Pool self._create_pool_and_validate(lb_data, "p_TCP", protocol=o_constants.PROTOCOL_TCP) pool_TCP_id = lb_data['pools'][0].pool_id # Members for TCP Pool self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.11') # CRUD health monitor. self._create_hm_and_validate(lb_data, pool_TCP_id, 'hm_TCP', 10, 30, 3, o_constants.HEALTH_MONITOR_TCP) self._update_hm_and_validate(lb_data, pool_TCP_id, admin_state_up=False) self._update_hm_and_validate(lb_data, pool_TCP_id, admin_state_up=True) self._delete_hm_and_validate(lb_data, pool_TCP_id) # Create to test delete over pool self._create_hm_and_validate(lb_data, pool_TCP_id, 'hm_TCP', 10, 30, 3, o_constants.HEALTH_MONITOR_TCP) # Deleting the pool should also delete the health monitor. self._delete_pool_and_validate(lb_data, "p_TCP") def test_listener(self): network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.1.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) network_N2 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id2, subnet_id2 = self._create_subnet_from_net( network_N2, '20.1.0.0/24') self._create_port_on_network(network_N2) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) self._attach_router_to_subnet(subnet_id2, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) self._create_pool_and_validate(lb_data, "p_TCP", protocol='TCP') self._create_pool_and_validate(lb_data, "p_UDP", protocol='UDP') self._create_pool_and_validate(lb_data, "p_SCTP", protocol='SCTP') pool_TCP_id = lb_data['pools'][0].pool_id pool_UDP_id = lb_data['pools'][1].pool_id pool_SCTP_id = lb_data['pools'][2].pool_id # Create member in TCP pool self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.1.0.4') self._create_member_and_validate( lb_data, pool_TCP_id, subnet_id2, network_id2, '20.1.0.4',) # Create member in UDP pool self._create_member_and_validate( lb_data, pool_UDP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.1.0.5') self._create_member_and_validate(lb_data, pool_UDP_id, subnet_id2, network_id2, '20.1.0.5') # Create member in SCTP pool self._create_member_and_validate( lb_data, pool_SCTP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.1.0.6') self._create_member_and_validate(lb_data, pool_SCTP_id, subnet_id2, network_id2, '20.1.0.6') # Play around first listener linked to TCP pool self._create_listener_and_validate( lb_data, pool_TCP_id, 80, protocol='TCP') self._update_listener_and_validate(lb_data, protocol_port=80) self._update_listener_and_validate( lb_data, protocol_port=80, admin_state_up=True) self._update_listener_and_validate( lb_data, protocol_port=80, admin_state_up=False) self._update_listener_and_validate( lb_data, protocol_port=80, admin_state_up=True) self._create_listener_and_validate( lb_data, pool_TCP_id, protocol_port=82, protocol='TCP') # Play around second listener linked to UDP pool self._create_listener_and_validate( lb_data, pool_UDP_id, 53, protocol='UDP') self._update_listener_and_validate(lb_data, 53, protocol='UDP') self._update_listener_and_validate( lb_data, protocol_port=53, protocol='UDP', admin_state_up=True) self._update_listener_and_validate( lb_data, protocol_port=53, protocol='UDP', admin_state_up=False) self._update_listener_and_validate( lb_data, protocol_port=53, protocol='UDP', admin_state_up=True) self._create_listener_and_validate( lb_data, pool_UDP_id, protocol_port=21, protocol='UDP') # Play around third listener linked to SCTP pool self._create_listener_and_validate( lb_data, pool_SCTP_id, 8081, protocol='SCTP') self._update_listener_and_validate(lb_data, 8081, protocol='SCTP') self._update_listener_and_validate( lb_data, protocol_port=8081, protocol='SCTP', admin_state_up=True) self._update_listener_and_validate( lb_data, protocol_port=8081, protocol='SCTP', admin_state_up=False) self._update_listener_and_validate( lb_data, protocol_port=8081, protocol='SCTP', admin_state_up=True) self._create_listener_and_validate( lb_data, pool_SCTP_id, protocol_port=8082, protocol='SCTP') # Delete listeners linked to TCP pool self._delete_listener_and_validate( lb_data, protocol_port=82, protocol='TCP') self._delete_listener_and_validate( lb_data, protocol_port=80, protocol='TCP') # Delete TCP pool members self._delete_member_and_validate(lb_data, pool_TCP_id, network_id2, '20.1.0.4') self._delete_member_and_validate(lb_data, pool_TCP_id, lb_data['vip_net_info'][0], '10.1.0.4') # Delete empty, TCP pool self._delete_pool_and_validate(lb_data, "p_TCP") # Delete the rest self._delete_load_balancer_and_validate(lb_data) def _test_cascade_delete(self, pool=True, listener=True, member=True): network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) if pool: self._create_pool_and_validate(lb_data, "p_TCP", protocol='TCP') self._create_pool_and_validate(lb_data, "p_UDP", protocol='UDP') self._create_pool_and_validate(lb_data, "p_SCTP", protocol='SCTP') pool_TCP_id = lb_data['pools'][0].pool_id pool_UDP_id = lb_data['pools'][1].pool_id pool_SCTP_id = lb_data['pools'][2].pool_id if member: self._create_member_and_validate( lb_data, pool_TCP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._create_member_and_validate( lb_data, pool_UDP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self._create_member_and_validate( lb_data, pool_SCTP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') if listener: self._create_listener_and_validate( lb_data, pool_TCP_id, protocol_port=80, protocol='TCP') self._create_listener_and_validate( lb_data, pool_UDP_id, protocol_port=53, protocol='UDP') self._create_listener_and_validate( lb_data, pool_SCTP_id, protocol_port=8081, protocol='SCTP') self._delete_load_balancer_and_validate(lb_data, cascade=True) def test_lb_listener_pools_cascade(self): self._test_cascade_delete(member=False) def test_lb_pool_cascade(self): self._test_cascade_delete(member=False, listener=False) def test_cascade_delete(self): self._test_cascade_delete() def test_hm_unsupported_protocol(self): network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) self._create_pool_and_validate(lb_data, "p_SCTP", protocol=o_constants.PROTOCOL_SCTP) pool_SCTP_id = lb_data['pools'][0].pool_id self._create_member_and_validate( lb_data, pool_SCTP_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') self.assertRaises(o_exceptions.UnsupportedOptionError, self._create_hm_and_validate, lb_data, pool_SCTP_id, 'hm_SCTP', 10, 30, 3, o_constants.HEALTH_MONITOR_SCTP) self._delete_load_balancer_and_validate(lb_data) def test_for_unsupported_options(self): network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) m_pool = self._create_pool_model(lb_data['model'].loadbalancer_id, 'lb1') m_pool.protocol = o_constants.PROTOCOL_HTTP self.assertRaises(o_exceptions.UnsupportedOptionError, self.ovn_driver.pool_create, m_pool) self.assertRaises(o_exceptions.UnsupportedOptionError, self.ovn_driver.loadbalancer_failover, lb_data['model'].loadbalancer_id) m_listener = self._create_listener_model( lb_data['model'].loadbalancer_id, m_pool.pool_id, 80) m_listener.protocol = o_constants.PROTOCOL_HTTP self.assertRaises(o_exceptions.UnsupportedOptionError, self.ovn_driver.listener_create, m_listener) self._create_listener_and_validate(lb_data) self._delete_load_balancer_and_validate(lb_data) def test_lb_listener_pool_workflow(self): network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) self._create_listener_and_validate(lb_data) self._create_pool_and_validate( lb_data, "p1", listener_id=lb_data['listeners'][0].listener_id) self._delete_pool_and_validate( lb_data, "p1", listener_id=lb_data['listeners'][0].listener_id) self._delete_listener_and_validate(lb_data) self._delete_load_balancer_and_validate(lb_data) def test_lb_member_batch_update(self): # Create a LoadBalancer network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) # Create LB lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) # Create a pool self._create_pool_and_validate(lb_data, "p1") pool_id = lb_data['pools'][0].pool_id # Create Member-1 and associate it with lb_data self._create_member_and_validate( lb_data, pool_id, lb_data['vip_net_info'][1], lb_data['vip_net_info'][0], '10.0.0.10') # Create Member-2 m_member = self._create_member_model(pool_id, lb_data['vip_net_info'][1], '10.0.0.12') # Update ovn's Logical switch reference self._update_ls_refs(lb_data, lb_data['vip_net_info'][0]) lb_data['pools'][0].members.append(m_member) # Add a new member to the LB members = [m_member] + [lb_data['pools'][0].members[0]] self._update_members_in_batch_and_validate(lb_data, pool_id, members) # Deleting one member, while keeping the other member available self._update_members_in_batch_and_validate(lb_data, pool_id, [m_member]) # Create Member-3 with monitor options m_member = self._create_member_model(pool_id, lb_data['vip_net_info'][1], '10.0.0.12') m_member.monitor_port = 8080 members = [m_member] self.assertRaises(o_exceptions.UnsupportedOptionError, self.ovn_driver.member_batch_update, pool_id, members) self._delete_load_balancer_and_validate(lb_data) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/functional/test_integration.py0000664000175100017510000001700315033037524031462 0ustar00mylesmyles# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider.common import utils from ovn_octavia_provider.tests.functional import base as ovn_base from neutron_lib.api.definitions import floating_ip_port_forwarding as pf_def from neutron_lib.utils import runtime from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils LOG = logging.getLogger(__name__) class TestOvnOctaviaProviderIntegration(ovn_base.TestOvnOctaviaBase): def setUp(self): super().setUp() # Add port_forwarding as a configured service plugin (if needed) svc_plugins = set(cfg.CONF.service_plugins) svc_plugins.add("port_forwarding") cfg.CONF.set_override("service_plugins", list(svc_plugins)) if not self.pf_plugin: # OVN does not use RPC: disable it for port-forwarding tests self.pf_plugin = self._load_port_forwarding_class() self.pf_plugin._rpc_notifications_required = False self.assertIsNotNone(self.pf_plugin, "TestOVNFunctionalBase is expected to have " "port forwarding plugin configured") @staticmethod def _load_port_forwarding_class(): """Load port forwarding plugin :returns: instance of plugin that is loaded :raises ImportError: if fails to load plugin """ try: loaded_class = runtime.load_class_by_alias_or_classname( 'neutron.service_plugins', 'port_forwarding') return loaded_class() except ImportError: with excutils.save_and_reraise_exception(): LOG.error("Error loading port_forwarding plugin") def _find_pf_lb(self, router_id, fip_id=None): result = [] for ovn_lb in self.nb_api.get_router_floatingip_lbs( utils.ovn_name(router_id)): ext_ids = ovn_lb.external_ids if not fip_id or fip_id == ext_ids[ovn_const.OVN_FIP_EXT_ID_KEY]: result.append(ovn_lb) return result or None def _loadbalancer_operation(self, lb_data=None, update=False, delete=False): if not lb_data: network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) network_id, subnet_id = self._create_subnet_from_net( network_N1, '10.0.0.0/24') port_address, port_id = self._create_port_on_network(network_N1) r1_id = self._create_router('r' + uuidutils.generate_uuid()[:4]) self._attach_router_to_subnet(subnet_id, r1_id) lb_data = self._create_load_balancer_and_validate( network_id, subnet_id, port_address, port_id, router_id=r1_id) if update: self._update_load_balancer_and_validate(lb_data, admin_state_up=False) self._update_load_balancer_and_validate(lb_data, admin_state_up=True) if delete: self._delete_load_balancer_and_validate(lb_data) return None if delete else lb_data def _validate_from_lb_data(self, lb_data): expected_lbs = self._make_expected_lbs(lb_data) self._validate_loadbalancers(expected_lbs) def test_port_forwarding(self): def _verify_pf_lb(test, protocol, vip_ext_port, vip_int_port): ovn_lbs = test._find_pf_lb(router_id, fip_id) test.assertEqual(len(ovn_lbs), 1) test.assertEqual(ovn_lbs[0].name, 'pf-floatingip-{}-{}'.format(fip_id, protocol)) self.assertEqual(ovn_lbs[0].vips, { '{}:{}'.format(fip_ip, vip_ext_port): '{}:{}'.format(p1_ip, vip_int_port)}) n1, s1 = self._create_provider_network() ext_net = n1['network'] ext_subnet = s1['subnet'] gw_info = { 'enable_snat': True, 'network_id': ext_net['id'], 'external_fixed_ips': [ {'ip_address': '100.0.0.2', 'subnet_id': ext_subnet['id']}]} # Create Network N2, connect it to router network_N1 = self._create_net('N' + uuidutils.generate_uuid()[:4]) n2_id, sub2_id = self._create_subnet_from_net( network_N1, "10.0.1.0/24") router_id = self._create_router('routertest', gw_info=gw_info) self._attach_router_to_subnet(sub2_id, router_id) p1_ip, p1_id = self._create_port_on_network(network_N1) fip_info = {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': ext_net['id'], 'port_id': None, 'fixed_ip_address': None}} fip = self.l3_plugin.create_floatingip(self.context, fip_info) fip_id = fip['id'] fip_ip = fip['floating_ip_address'] # Create floating ip port forwarding. This will create an # OVN load balancer fip_pf_args = { pf_def.EXTERNAL_PORT: 2222, pf_def.INTERNAL_PORT: 22, pf_def.INTERNAL_PORT_ID: p1_id, pf_def.PROTOCOL: 'tcp', pf_def.INTERNAL_IP_ADDRESS: p1_ip} fip_attrs = {pf_def.RESOURCE_NAME: {pf_def.RESOURCE_NAME: fip_pf_args}} pf_obj = self.pf_plugin.create_floatingip_port_forwarding( self.context, fip_id, **fip_attrs) # Check pf_lb with no octavia_provider_lb _verify_pf_lb(self, 'tcp', 2222, 22) # Create octavia_provider_lb lb_data = self._loadbalancer_operation() expected_lbs = self._make_expected_lbs(lb_data) _verify_pf_lb(self, 'tcp', 2222, 22) fip_pf_args2 = {pf_def.EXTERNAL_PORT: 5353, pf_def.INTERNAL_PORT: 53, pf_def.PROTOCOL: 'udp'} fip_attrs2 = {pf_def.RESOURCE_NAME: { pf_def.RESOURCE_NAME: fip_pf_args2}} self.pf_plugin.update_floatingip_port_forwarding( self.context, pf_obj['id'], fip_id, **fip_attrs2) # Make sure octavia_provider_lb is not disturbed self._validate_loadbalancers(expected_lbs) # Update octavia_provider_lb self._loadbalancer_operation(lb_data, update=True) _verify_pf_lb(self, 'udp', 5353, 53) # Delete octavia_provider_lb self._loadbalancer_operation(lb_data, delete=True) _verify_pf_lb(self, 'udp', 5353, 53) # Delete pf_lb after creating octavia_provider_lb lb_data = self._loadbalancer_operation() expected_lbs = self._make_expected_lbs(lb_data) self.pf_plugin.delete_floatingip_port_forwarding( self.context, pf_obj['id'], fip_id) self._loadbalancer_operation(lb_data, update=True) self.assertIsNone(self._find_pf_lb(router_id, fip_id)) # Make sure octavia_provider_lb is not disturbed self._validate_loadbalancers(expected_lbs) self._loadbalancer_operation(lb_data, delete=True) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5169845 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/0000775000175100017510000000000015033037526024344 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/__init__.py0000664000175100017510000000000015033037524026441 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/base.py0000664000175100017510000000570115033037524025631 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from neutron.tests import base from octavia_lib.api.drivers import driver_lib from oslo_utils import uuidutils class TestOvnOctaviaBase(base.BaseTestCase): def setUp(self): super().setUp() self.listener_id = uuidutils.generate_uuid() self.loadbalancer_id = uuidutils.generate_uuid() self.pool_id = uuidutils.generate_uuid() self.member_id = uuidutils.generate_uuid() self.member_subnet_id = uuidutils.generate_uuid() self.member_port = '1010' self.member_pool_id = self.pool_id self.member_address = '192.168.2.149' self.port1_id = uuidutils.generate_uuid() self.port2_id = uuidutils.generate_uuid() self.project_id = uuidutils.generate_uuid() self.vip_network_id = uuidutils.generate_uuid() self.vip_port_id = uuidutils.generate_uuid() self.vip_subnet_id = uuidutils.generate_uuid() self.healthmonitor_id = uuidutils.generate_uuid() ovn_nb_idl = mock.patch( 'ovn_octavia_provider.ovsdb.impl_idl_ovn.OvnNbIdlForLb') self.mock_ovn_nb_idl = ovn_nb_idl.start() ovn_sb_idl = mock.patch( 'ovn_octavia_provider.ovsdb.impl_idl_ovn.OvnSbIdlForLb') self.mock_ovn_sb_idl = ovn_sb_idl.start() ovsdb_nb_idl = mock.patch( 'ovn_octavia_provider.ovsdb.impl_idl_ovn.OvsdbNbOvnIdl') self.mock_ovsdb_nb_idl = ovsdb_nb_idl.start() connection = mock.patch( 'ovsdbapp.backend.ovs_idl.connection.Connection') self.mock_connection = connection.start() self.member_address = '192.168.2.149' self.vip_address = '192.148.210.109' self.vip_dict = {'vip_network_id': uuidutils.generate_uuid(), 'vip_subnet_id': uuidutils.generate_uuid()} self.vip_output = {'vip_network_id': self.vip_dict['vip_network_id'], 'vip_subnet_id': self.vip_dict['vip_subnet_id']} self.additional_vips = [{ 'ip_address': '2001:db8:0:1::12', 'network_id': self.vip_dict['vip_network_id'], 'port_id': uuidutils.generate_uuid(), 'subnet_id': uuidutils.generate_uuid() }] mock.patch( 'ovsdbapp.backend.ovs_idl.idlutils.get_schema_helper').start() mock.patch.object( driver_lib.DriverLibrary, '_check_for_socket_ready').start() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5169845 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/common/0000775000175100017510000000000015033037526025634 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/common/__init__.py0000664000175100017510000000000015033037524027731 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/common/test_clients.py0000664000175100017510000001620315033037524030706 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from keystoneauth1 import exceptions as ks_exceptions from octavia_lib.api.drivers import exceptions as driver_exceptions from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslotest import base from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import config class TestKeystoneSession(base.BaseTestCase): def setUp(self): super().setUp() config.register_opts() self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) @mock.patch( 'keystoneauth1.loading.load_auth_from_conf_options') def test_auth(self, kl_auth): missing_options = [mock.Mock(dest='username')] auth = mock.Mock() # service_auth with missing option kl_auth.side_effect = [ ks_exceptions.auth_plugins.MissingRequiredOptions(missing_options) ] ksession = clients.KeystoneSession() self.assertRaises( ks_exceptions.auth_plugins.MissingRequiredOptions, lambda: ksession.auth) # neutron with missing option, missing option also in service_auth kl_auth.reset_mock() kl_auth.side_effect = [ ks_exceptions.auth_plugins.MissingRequiredOptions(missing_options), auth, ks_exceptions.auth_plugins.MissingRequiredOptions(missing_options), ] ksession = clients.KeystoneSession('neutron') self.assertRaises( ks_exceptions.auth_plugins.MissingRequiredOptions, lambda: ksession.auth) # neutron with missing option, it is copied from service_auth kl_auth.reset_mock() kl_auth.side_effect = [ ks_exceptions.auth_plugins.MissingRequiredOptions(missing_options), auth, auth, ] self.conf.config(group='service_auth', endpoint_override='foo') ksession = clients.KeystoneSession('neutron') self.assertEqual(ksession.auth, auth) self.assertEqual(cfg.CONF.neutron.endpoint_override, 'foo') @mock.patch( 'keystoneauth1.loading.load_session_from_conf_options') @mock.patch( 'keystoneauth1.loading.load_auth_from_conf_options') def test_cached_session(self, kl_auth, kl_session): ksession = clients.KeystoneSession('neutron') self.assertIs( ksession.session, ksession.session) kl_session.assert_called_once_with( mock.ANY, 'neutron', auth=ksession.auth) @mock.patch( 'keystoneauth1.loading.load_auth_from_conf_options') def test_cached_auth(self, kl): ksession = clients.KeystoneSession('neutron') self.assertIs( ksession.auth, ksession.auth) kl.assert_called_once_with(mock.ANY, 'neutron') class TestNeutronAuth(base.BaseTestCase): def setUp(self): super().setUp() config.register_opts() self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group='neutron', region_name='RegionOne', valid_interfaces='internal') self.mock_client = mock.patch( 'openstack.connection.Connection').start() clients.Singleton._instances = {} @mock.patch.object(clients, 'KeystoneSession') def test_init(self, mock_ks): clients.NeutronAuth() self.mock_client.assert_called_once_with( session=mock_ks().session, interface='internal', region_name='RegionOne') def test_singleton(self): c1 = clients.NeutronAuth() c2 = clients.NeutronAuth() self.assertIs(c1, c2) def test_singleton_exception(self): mock_client = mock.Mock() mock_client.network_proxy = 'foo' with mock.patch( 'openstack.connection.Connection', side_effect=[RuntimeError, mock_client, mock_client]) as os_sdk: self.assertRaises( RuntimeError, clients.NeutronAuth) c2 = clients.NeutronAuth() c3 = clients.NeutronAuth() self.assertIs(c2, c3) self.assertEqual(os_sdk._mock_call_count, 2) @mock.patch.object(clients, 'KeystoneSession') def test_get_client(self, mock_ks): clients.get_neutron_client() self.mock_client.assert_called_once_with( session=mock_ks().session, interface='internal', region_name='RegionOne') @mock.patch.object(clients, 'NeutronAuth', side_effect=[RuntimeError]) def test_get_client_error(self, mock_ks): msg = self.assertRaises( driver_exceptions.DriverError, clients.get_neutron_client) self.assertEqual("An unknown driver error occurred.", str(msg)) class TestOctaviaAuth(base.BaseTestCase): def setUp(self): super().setUp() config.register_opts() self.mock_client = mock.patch( 'openstack.connection.Connection').start() clients.Singleton._instances = {} @mock.patch.object(clients, 'KeystoneSession') @mock.patch('openstack.connection.Connection') def test_init(self, mock_conn, mock_ks): clients.OctaviaAuth() mock_conn.assert_called_once_with( session=mock_ks().session, region_name=mock.ANY ) def test_singleton(self): c1 = clients.OctaviaAuth() c2 = clients.OctaviaAuth() self.assertIs(c1, c2) def test_singleton_exception(self): mock_client = mock.Mock() mock_client.lbaas_proxy = 'foo' with mock.patch( 'openstack.connection.Connection', side_effect=[RuntimeError, mock_client, mock_client]) as os_sdk: self.assertRaises( RuntimeError, clients.OctaviaAuth) c2 = clients.OctaviaAuth() c3 = clients.OctaviaAuth() self.assertIs(c2, c3) self.assertEqual(os_sdk._mock_call_count, 2) @mock.patch.object(clients, 'KeystoneSession') @mock.patch('openstack.connection.Connection') def test_get_client(self, mock_conn, mock_ks): clients.get_octavia_client() mock_conn.assert_called_once_with( session=mock_ks().session, region_name=mock.ANY ) @mock.patch.object(clients, 'OctaviaAuth', side_effect=[RuntimeError]) def test_get_client_error(self, mock_ks): msg = self.assertRaises( driver_exceptions.DriverError, clients.get_octavia_client) self.assertEqual("An unknown driver error occurred.", str(msg)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/common/test_utils.py0000664000175100017510000000415315033037524030406 0ustar00mylesmyles# Copyright 2022 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from neutron.tests import base from ovn_octavia_provider.common import config as ovn_config from ovn_octavia_provider.common import utils class TestRetryDecorator(base.BaseTestCase): DEFAULT_RETRY_VALUE = 10 def setUp(self): super().setUp() mock.patch.object( ovn_config, "get_ovn_ovsdb_retry_max_interval", return_value=self.DEFAULT_RETRY_VALUE).start() def test_default_retry_value(self): with mock.patch('tenacity.wait_exponential') as m_wait: @utils.retry() def decorated_method(): pass decorated_method() m_wait.assert_called_with(max=self.DEFAULT_RETRY_VALUE) def test_custom_retry_value(self): custom_value = 3 with mock.patch('tenacity.wait_exponential') as m_wait: @utils.retry(max_=custom_value) def decorated_method(): pass decorated_method() m_wait.assert_called_with(max=custom_value) def test_positive_result(self): number_of_exceptions = 3 method = mock.Mock( side_effect=[Exception() for i in range(number_of_exceptions)]) @utils.retry(max_=0.001) def decorated_method(): try: method() except StopIteration: return decorated_method() # number of exceptions + one successful call self.assertEqual(number_of_exceptions + 1, method.call_count) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/fakes.py0000664000175100017510000002506115033037524026011 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from unittest import mock from octavia_lib.api.drivers import data_models from oslo_utils import uuidutils from ovn_octavia_provider.common import constants from ovn_octavia_provider.common import utils class FakeResource(dict): def __init__(self, manager=None, info=None, loaded=False, methods=None): """Set attributes and methods for a resource. :param manager: The resource manager :param Dictionary info: A dictionary with all attributes :param bool loaded: True if the resource is loaded in memory :param Dictionary methods: A dictionary with all methods """ info = info or {} super().__init__(info) methods = methods or {} self.__name__ = type(self).__name__ self.manager = manager self._info = info self._add_details(info) self._add_methods(methods) self._loaded = loaded # Add a revision number by default setattr(self, 'revision_number', 1) @property def db_obj(self): return self def _add_details(self, info): for (k, v) in info.items(): setattr(self, k, v) def _add_methods(self, methods): """Fake methods with MagicMock objects. For each <@key, @value> pairs in methods, add an callable MagicMock object named @key as an attribute, and set the mock's return_value to @value. When users access the attribute with (), @value will be returned, which looks like a function call. """ for (name, ret) in methods.items(): method = mock.MagicMock(return_value=ret) setattr(self, name, method) def __repr__(self): reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and k != 'manager') info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) return "<%s %s>" % (self.__class__.__name__, info) def keys(self): return self._info.keys() def info(self): return self._info def update(self, info): super().update(info) self._add_details(info) class FakeOpenStackSDKResource(FakeResource): def __getattr__(self, attr): if attr in self._info: return self._info[attr] else: raise AttributeError("No such attribute '{}'".format(attr)) class FakeOvsdbRow(FakeResource): """Fake one or more OVSDB rows.""" @staticmethod def create_one_ovsdb_row(attrs=None, methods=None): """Create a fake OVSDB row. :param Dictionary attrs: A dictionary with all attributes :param Dictionary methods: A dictionary with all methods :return: A FakeResource object faking the OVSDB row """ attrs = attrs or {} methods = methods or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() ovsdb_row_attrs = { 'uuid': fake_uuid, 'name': 'name-' + fake_uuid, 'external_ids': {}, } # Set default methods. ovsdb_row_methods = { 'addvalue': None, 'delete': None, 'delvalue': None, 'verify': None, 'setkey': None, } # Overwrite default attributes and methods. ovsdb_row_attrs.update(attrs) ovsdb_row_methods.update(methods) return FakeResource(info=copy.deepcopy(ovsdb_row_attrs), loaded=True, methods=copy.deepcopy(ovsdb_row_methods)) class FakeSubnet(): """Fake one or more subnets.""" @staticmethod def create_one_subnet(attrs=None): """Create a fake subnet. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the subnet """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() subnet_attrs = { 'id': 'subnet-id-' + fake_uuid, 'name': 'subnet-name-' + fake_uuid, 'network_id': 'network-id-' + fake_uuid, 'cidr': '10.10.10.0/24', 'tenant_id': 'project-id-' + fake_uuid, 'enable_dhcp': True, 'dns_nameservers': [], 'allocation_pools': [], 'host_routes': [], 'ip_version': 4, 'gateway_ip': '10.10.10.1', 'ipv6_address_mode': 'None', 'ipv6_ra_mode': 'None', 'subnetpool_id': None, } # Overwrite default attributes. subnet_attrs.update(attrs) return FakeOpenStackSDKResource(info=copy.deepcopy(subnet_attrs), loaded=True) class FakeOVNPort(): """Fake one or more ports.""" @staticmethod def create_one_port(attrs=None): """Create a fake ovn port. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the port """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() port_attrs = { 'addresses': [], 'dhcpv4_options': '', 'dhcpv6_options': [], 'enabled': True, 'external_ids': {}, 'name': fake_uuid, 'options': {}, 'parent_name': [], 'port_security': [], 'tag': [], 'tag_request': [], 'type': '', 'up': False, } # Overwrite default attributes. port_attrs.update(attrs) return type('Logical_Switch_Port', (object, ), port_attrs) @staticmethod def from_neutron_port(port): """Create a fake ovn port based on a neutron port.""" external_ids = { constants.OVN_NETWORK_NAME_EXT_ID_KEY: utils.ovn_name(port['network_id']), constants.OVN_SG_IDS_EXT_ID_KEY: ' '.join(port['security_groups']), constants.OVN_DEVICE_OWNER_EXT_ID_KEY: port.get('device_owner', '')} addresses = [port['mac_address'], ] addresses += [x['ip_address'] for x in port.get('fixed_ips', [])] port_security = ( addresses + [x['ip_address'] for x in port.get('allowed_address_pairs', [])]) return FakeOVNPort.create_one_port( {'external_ids': external_ids, 'addresses': addresses, 'port_security': port_security}) class FakeOVNRouter(): @staticmethod def create_one_router(attrs=None): router_attrs = { 'enabled': False, 'external_ids': {}, 'load_balancer': [], 'name': '', 'nat': [], 'options': {}, 'ports': [], 'static_routes': [], } # Overwrite default attributes. router_attrs.update(attrs) return type('Logical_Router', (object, ), router_attrs) class FakeOVNLB(): @staticmethod def create_one_lb(attrs=None): fake_uuid = uuidutils.generate_uuid() lb_attrs = { 'uuid': fake_uuid, 'external_ids': {}, 'health_check': [], 'ip_port_mappings': {}, 'name': '', 'options': {}, 'protocol': 'tcp', 'selection_fields': [], 'vips': {} } # Overwrite default attributes. lb_attrs.update(attrs) return type('Load_Balancer', (object, ), lb_attrs) class FakePort(): """Fake one or more ports.""" @staticmethod def create_one_port(attrs=None): """Create a fake port. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the port """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() port_attrs = { 'admin_state_up': True, 'allowed_address_pairs': [{}], 'binding:host_id': 'binding-host-id-' + fake_uuid, 'binding:profile': {}, 'binding:vif_details': {}, 'binding:vif_type': 'ovs', 'binding:vnic_type': 'normal', 'device_id': 'device-id-' + fake_uuid, 'device_owner': 'compute:nova', 'dns_assignment': [{}], 'dns_name': 'dns-name-' + fake_uuid, 'extra_dhcp_opts': [{}], 'fixed_ips': [{'subnet_id': 'subnet-id-' + fake_uuid, 'ip_address': '10.10.10.20'}], 'id': 'port-id-' + fake_uuid, 'mac_address': 'fa:16:3e:a9:4e:72', 'name': 'port-name-' + fake_uuid, 'network_id': 'network-id-' + fake_uuid, 'port_security_enabled': True, 'security_groups': [], 'status': 'ACTIVE', 'tenant_id': 'project-id-' + fake_uuid, } # Overwrite default attributes. port_attrs.update(attrs) return FakeOpenStackSDKResource(info=copy.deepcopy(port_attrs), loaded=True) class FakeLB(data_models.LoadBalancer): def __init__(self, *args, **kwargs): self.external_ids = kwargs.pop('ext_ids') self.uuid = kwargs.pop('uuid') super().__init__(*args, **kwargs) def __hash__(self): # Required for Python3, not for Python2 return self.__sizeof__() class FakePool(data_models.Pool): def __init__(self, *args, **kwargs): self.uuid = kwargs.pop('uuid') super().__init__(*args, **kwargs) def __hash__(self): # Required for Python3, not for Python2 return self.__sizeof__() class FakeMember(data_models.Member): def __init__(self, *args, **kwargs): self.uuid = kwargs.pop('uuid') super().__init__(*args, **kwargs) def __hash__(self): # Required for Python3, not for Python2 return self.__sizeof__() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5169845 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/hacking/0000775000175100017510000000000015033037526025750 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/hacking/__init__.py0000664000175100017510000000000015033037524030045 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/hacking/test_checks.py0000664000175100017510000002703715033037524030630 0ustar00mylesmyles# Copyright 2015 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from oslotest import base from ovn_octavia_provider.hacking import checks class HackingTestCase(base.BaseTestCase): """Hacking test class. This class tests the hacking checks in ovn_octavia_provider.hacking.checks by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ def assertLinePasses(self, func, *args): with testtools.ExpectedException(StopIteration): next(func(*args)) def assertLineFails(self, func, *args): self.assertIsInstance(next(func(*args)), tuple) def test_assert_called_once_with(self): fail_code1 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assertCalledOnceWith() """ fail_code2 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.called_once_with() """ fail_code3 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_has_called() """ pass_code = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_called_once_with() """ pass_code2 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_has_calls() """ self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code1, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code2, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assert_called_once_with(pass_code, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code3, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assert_called_once_with(pass_code2, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assert_called_once_with(pass_code2, "ovn_octavia_provider/tests_fake/test_assert.py")))) def test_asserttruefalse(self): true_fail_code1 = """ test_bool = True self.assertEqual(True, test_bool) """ true_fail_code2 = """ test_bool = True self.assertEqual(test_bool, True) """ true_pass_code = """ test_bool = True self.assertTrue(test_bool) """ false_fail_code1 = """ test_bool = False self.assertEqual(False, test_bool) """ false_fail_code2 = """ test_bool = False self.assertEqual(test_bool, False) """ false_pass_code = """ test_bool = False self.assertFalse(test_bool) """ self.assertEqual( 1, len(list(checks.check_asserttruefalse(true_fail_code1, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_asserttruefalse(true_fail_code2, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_asserttruefalse(true_pass_code, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_asserttruefalse(false_fail_code1, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_asserttruefalse(false_fail_code2, "ovn_octavia_provider/tests/test_assert.py")))) self.assertFalse( list(checks.check_asserttruefalse(false_pass_code, "ovn_octavia_provider/tests/test_assert.py"))) self.assertEqual( 0, len(list(checks.check_asserttruefalse( true_pass_code, "ovn_octavia_provider/tests_fake/test_assert.py")))) def test_assertempty(self): fail_code = """ test_empty = %s self.assertEqual(test_empty, %s) """ pass_code1 = """ test_empty = %s self.assertEqual(%s, test_empty) """ pass_code2 = """ self.assertEqual(123, foo(abc, %s)) """ empty_cases = ['{}', '[]', '""', "''", '()', 'set()'] for ec in empty_cases: self.assertEqual( 1, len(list(checks.check_assertempty(fail_code % (ec, ec), "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_asserttruefalse(pass_code1 % (ec, ec), "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_asserttruefalse(pass_code2 % ec, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list( checks.check_asserttruefalse( pass_code2 % ec, "ovn_octavia_provider/tests_fake/test_assert.py")))) def test_assertisinstance(self): fail_code = """ self.assertTrue(isinstance(observed, ANY_TYPE)) """ pass_code1 = """ self.assertEqual(ANY_TYPE, type(observed)) """ pass_code2 = """ self.assertIsInstance(observed, ANY_TYPE) """ self.assertEqual( 1, len(list(checks.check_assertisinstance(fail_code, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertisinstance(pass_code1, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertisinstance(pass_code2, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertisinstance(pass_code2, "ovn_octavia_provider/tests_fake/test_assert.py")))) def test_assertequal_for_httpcode(self): fail_code = """ self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) """ pass_code = """ self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) """ self.assertEqual( 1, len(list(checks.check_assertequal_for_httpcode(fail_code, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertequal_for_httpcode(pass_code, "ovn_octavia_provider/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertequal_for_httpcode(pass_code, "ovn_octavia_provider/tests_fake/test_assert.py")))) def test_check_no_imports_from_tests(self): fail_codes = ('from ovn_octavia_provider import tests', 'from ovn_octavia_provider.tests import base', 'import ovn_octavia_provider.tests.base') for fail_code in fail_codes: self.assertEqual( 1, len(list(checks.check_no_imports_from_tests(fail_code, "ovn_octavia_provider/common/utils.py")))) self.assertEqual( 0, len(list(checks.check_no_imports_from_tests(fail_code, "ovn_octavia_provider/tests/test_fake.py")))) def test_check_python3_filter(self): f = checks.check_python3_no_filter self.assertLineFails(f, "filter(lambda obj: test(obj), data)") self.assertLinePasses(f, "[obj for obj in data if test(obj)]") self.assertLinePasses(f, "filter(function, range(0,10))") self.assertLinePasses(f, "lambda x, y: x+y") def test_check_no_import_mock(self): pass_line = 'from unittest import mock' fail_lines = ('import mock', 'import mock as mock_lib', 'from mock import patch') self.assertEqual( 0, len(list( checks.check_no_import_mock( pass_line, "ovn_octavia_provider/tests/test_fake.py", None)))) for fail_line in fail_lines: self.assertEqual( 0, len(list( checks.check_no_import_mock( fail_line, "ovn_octavia_provider/common/utils.py", None)))) self.assertEqual( 1, len(list( checks.check_no_import_mock( fail_line, "ovn_octavia_provider/tests/test_fake.py", None)))) self.assertEqual( 0, len(list( checks.check_no_import_mock( fail_line, "ovn_octavia_provider/tests/test_fake.py", True)))) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5169845 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/ovsdb/0000775000175100017510000000000015033037526025461 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/ovsdb/__init__.py0000664000175100017510000000000015033037524027556 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/ovsdb/test_impl_idl_ovn.py0000664000175100017510000001020215033037524031536 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from unittest import mock from neutron.tests import base from ovs.db import idl as ovs_idl from ovsdbapp.backend import ovs_idl as real_ovs_idl from ovsdbapp.backend.ovs_idl import idlutils from ovn_octavia_provider.common import config as ovn_config from ovn_octavia_provider.ovsdb import impl_idl_ovn basedir = os.path.dirname(os.path.abspath(__file__)) schema_files = { 'OVN_Northbound': os.path.join(basedir, '..', 'schemas', 'ovn-nb.ovsschema'), 'OVN_Southbound': os.path.join(basedir, '..', 'schemas', 'ovn-sb.ovsschema')} class TestOvnNbIdlForLb(base.BaseTestCase): def setUp(self): super().setUp() ovn_config.register_opts() self.mock_gsh = mock.patch.object( idlutils, 'get_schema_helper', side_effect=lambda x, y: ovs_idl.SchemaHelper( location=schema_files['OVN_Northbound'])).start() self.idl = impl_idl_ovn.OvnNbIdlForLb() def test__get_ovsdb_helper(self): self.mock_gsh.reset_mock() self.idl._get_ovsdb_helper('foo') self.mock_gsh.assert_called_once_with('foo', 'OVN_Northbound') def test_setlock(self): with mock.patch.object(impl_idl_ovn.OvnNbIdlForLb, 'set_lock') as set_lock: self.idl = impl_idl_ovn.OvnNbIdlForLb(event_lock_name='foo') set_lock.assert_called_once_with('foo') class TestOvnSbIdlForLb(base.BaseTestCase): def setUp(self): super().setUp() ovn_config.register_opts() self.mock_gsh = mock.patch.object( idlutils, 'get_schema_helper', side_effect=lambda x, y: ovs_idl.SchemaHelper( location=schema_files['OVN_Southbound'])).start() self.idl = impl_idl_ovn.OvnSbIdlForLb() @mock.patch.object(real_ovs_idl.Backend, 'autocreate_indices', mock.Mock(), create=True) def test_start_reuses_connection(self): with mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection', side_effect=lambda x, timeout: mock.Mock()): idl1 = impl_idl_ovn.OvnSbIdlForLb() ret1 = idl1.start() id1 = id(ret1.ovsdb_connection) idl2 = impl_idl_ovn.OvnSbIdlForLb() ret2 = idl2.start() id2 = id(ret2.ovsdb_connection) self.assertEqual(id1, id2) @mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection') def test_stop(self, mock_conn): mock_conn.stop.return_value = False with ( mock.patch.object( self.idl.notify_handler, 'shutdown')) as mock_notify, ( mock.patch.object(self.idl, 'close')) as mock_close: self.idl.start() self.idl.stop() mock_notify.assert_called_once_with() mock_close.assert_called_once_with() @mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection') def test_stop_no_connection(self, mock_conn): mock_conn.stop.return_value = False with ( mock.patch.object( self.idl.notify_handler, 'shutdown')) as mock_notify, ( mock.patch.object(self.idl, 'close')) as mock_close: self.idl.stop() mock_notify.assert_called_once_with() mock_close.assert_called_once_with() def test_setlock(self): with mock.patch.object(impl_idl_ovn.OvnSbIdlForLb, 'set_lock') as set_lock: self.idl = impl_idl_ovn.OvnSbIdlForLb(event_lock_name='foo') set_lock.assert_called_once_with('foo') ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/ovsdb/test_ovsdb_monitor.py0000664000175100017510000000714415033037524031762 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from oslo_config import cfg import testscenarios from ovn_octavia_provider.ovsdb import ovsdb_monitor from ovn_octavia_provider.tests.unit import base as base_test OPTS = ('ovn_nb_private_key', 'ovn_nb_certificate', 'ovn_nb_ca_cert', 'ovn_sb_private_key', 'ovn_sb_certificate', 'ovn_sb_ca_cert') class TestOvsdbMonitor(testscenarios.WithScenarios, base_test.TestOvnOctaviaBase): scenarios = [ ('OVN_Northbound', {'schema': 'OVN_Northbound', 'private_key': 'ovn_nb_private_key', 'certificate': 'ovn_nb_certificate', 'ca_cert': 'ovn_nb_ca_cert'}), ('OVN_Southbound', {'schema': 'OVN_Southbound', 'private_key': 'ovn_sb_private_key', 'certificate': 'ovn_sb_certificate', 'ca_cert': 'ovn_sb_ca_cert'}) ] def setUp(self): super().setUp() self._register_opts() self.mock_os_path = mock.patch('os.path.exists').start() self.mock_stream = mock.patch.object(ovsdb_monitor, 'Stream').start() @staticmethod def _register_opts(): for opt in OPTS: try: cfg.CONF.register_opt(cfg.StrOpt(opt), group='ovn') except cfg.DuplicateOptError: pass def test_set_ssl(self): cfg.CONF.set_override(self.private_key, 'key', group='ovn') cfg.CONF.set_override(self.certificate, 'cert', group='ovn') cfg.CONF.set_override(self.ca_cert, 'ca-cert', group='ovn') self.mock_os_path.return_value = True ovsdb_monitor.check_and_set_ssl_files(self.schema) self.mock_stream.ssl_set_private_key_file.assert_called_with('key') self.mock_stream.ssl_set_certificate_file.assert_called_with('cert') self.mock_stream.ssl_set_ca_cert_file.assert_called_with('ca-cert') def test_no_key_and_certs(self): cfg.CONF.set_override(self.private_key, '', group='ovn') cfg.CONF.set_override(self.certificate, '', group='ovn') cfg.CONF.set_override(self.ca_cert, '', group='ovn') self.mock_os_path.return_value = False ovsdb_monitor.check_and_set_ssl_files(self.schema) self.mock_stream.ssl_set_private_key_file.assert_not_called() self.mock_stream.ssl_set_certificate_file.assert_not_called() self.mock_stream.ssl_set_ca_cert_file.assert_not_called() def test_no_nonexisting_files(self): cfg.CONF.set_override(self.private_key, 'key', group='ovn') cfg.CONF.set_override(self.certificate, 'cert', group='ovn') cfg.CONF.set_override(self.ca_cert, 'ca-cert', group='ovn') self.mock_os_path.return_value = False with self.assertLogs(): ovsdb_monitor.check_and_set_ssl_files(self.schema) self.mock_stream.ssl_set_private_key_file.assert_not_called() self.mock_stream.ssl_set_certificate_file.assert_not_called() self.mock_stream.ssl_set_ca_cert_file.assert_not_called() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5169845 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/schemas/0000775000175100017510000000000015033037526025767 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/schemas/ovn-nb.ovsschema0000664000175100017510000006235615033037524031112 0ustar00mylesmyles{ "name": "OVN_Northbound", "version": "5.23.0", "cksum": "111023208 25806", "tables": { "NB_Global": { "columns": { "name": {"type": "string"}, "nb_cfg": {"type": {"key": "integer"}}, "sb_cfg": {"type": {"key": "integer"}}, "hv_cfg": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "connections": { "type": {"key": {"type": "uuid", "refTable": "Connection"}, "min": 0, "max": "unlimited"}}, "ssl": { "type": {"key": {"type": "uuid", "refTable": "SSL"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ipsec": {"type": "boolean"}}, "maxRows": 1, "isRoot": true}, "Logical_Switch": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Logical_Switch_Port", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "acls": {"type": {"key": {"type": "uuid", "refTable": "ACL", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "qos_rules": {"type": {"key": {"type": "uuid", "refTable": "QoS", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "load_balancer": {"type": {"key": {"type": "uuid", "refTable": "Load_Balancer", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "dns_records": {"type": {"key": {"type": "uuid", "refTable": "DNS", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "other_config": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "forwarding_groups": { "type": {"key": {"type": "uuid", "refTable": "Forwarding_Group", "refType": "strong"}, "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Logical_Switch_Port": { "columns": { "name": {"type": "string"}, "type": {"type": "string"}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "parent_name": {"type": {"key": "string", "min": 0, "max": 1}}, "tag_request": { "type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 4095}, "min": 0, "max": 1}}, "tag": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4095}, "min": 0, "max": 1}}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "dynamic_addresses": {"type": {"key": "string", "min": 0, "max": 1}}, "port_security": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "up": {"type": {"key": "boolean", "min": 0, "max": 1}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "dhcpv4_options": {"type": {"key": {"type": "uuid", "refTable": "DHCP_Options", "refType": "weak"}, "min": 0, "max": 1}}, "dhcpv6_options": {"type": {"key": {"type": "uuid", "refTable": "DHCP_Options", "refType": "weak"}, "min": 0, "max": 1}}, "ha_chassis_group": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis_Group", "refType": "strong"}, "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "Forwarding_Group": { "columns": { "name": {"type": "string"}, "vip": {"type": "string"}, "vmac": {"type": "string"}, "liveness": {"type": "boolean"}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "child_port": {"type": {"key": "string", "min": 1, "max": "unlimited"}}}, "isRoot": false}, "Address_Set": { "columns": { "name": {"type": "string"}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Port_Group": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Logical_Switch_Port", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "acls": {"type": {"key": {"type": "uuid", "refTable": "ACL", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Load_Balancer": { "columns": { "name": {"type": "string"}, "vips": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp", "sctp"]]}, "min": 0, "max": 1}}, "health_check": {"type": { "key": {"type": "uuid", "refTable": "Load_Balancer_Health_Check", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ip_port_mappings": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "selection_fields": { "type": {"key": {"type": "string", "enum": ["set", ["eth_src", "eth_dst", "ip_src", "ip_dst", "tp_src", "tp_dst"]]}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Load_Balancer_Health_Check": { "columns": { "vip": {"type": "string"}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "ACL": { "columns": { "name": {"type": {"key": {"type": "string", "maxLength": 63}, "min": 0, "max": 1}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "direction": {"type": {"key": {"type": "string", "enum": ["set", ["from-lport", "to-lport"]]}}}, "match": {"type": "string"}, "action": {"type": {"key": {"type": "string", "enum": ["set", ["allow", "allow-related", "drop", "reject"]]}}}, "log": {"type": "boolean"}, "severity": {"type": {"key": {"type": "string", "enum": ["set", ["alert", "warning", "notice", "info", "debug"]]}, "min": 0, "max": 1}}, "meter": {"type": {"key": "string", "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "QoS": { "columns": { "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "direction": {"type": {"key": {"type": "string", "enum": ["set", ["from-lport", "to-lport"]]}}}, "match": {"type": "string"}, "action": {"type": {"key": {"type": "string", "enum": ["set", ["dscp"]]}, "value": {"type": "integer", "minInteger": 0, "maxInteger": 63}, "min": 0, "max": "unlimited"}}, "bandwidth": {"type": {"key": {"type": "string", "enum": ["set", ["rate", "burst"]]}, "value": {"type": "integer", "minInteger": 1, "maxInteger": 4294967295}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Meter": { "columns": { "name": {"type": "string"}, "unit": {"type": {"key": {"type": "string", "enum": ["set", ["kbps", "pktps"]]}}}, "bands": {"type": {"key": {"type": "uuid", "refTable": "Meter_Band", "refType": "strong"}, "min": 1, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Meter_Band": { "columns": { "action": {"type": {"key": {"type": "string", "enum": ["set", ["drop"]]}}}, "rate": {"type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4294967295}}}, "burst_size": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 4294967295}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Logical_Router": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Logical_Router_Port", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "static_routes": {"type": {"key": {"type": "uuid", "refTable": "Logical_Router_Static_Route", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "policies": { "type": {"key": {"type": "uuid", "refTable": "Logical_Router_Policy", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "nat": {"type": {"key": {"type": "uuid", "refTable": "NAT", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "load_balancer": {"type": {"key": {"type": "uuid", "refTable": "Load_Balancer", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Logical_Router_Port": { "columns": { "name": {"type": "string"}, "gateway_chassis": { "type": {"key": {"type": "uuid", "refTable": "Gateway_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ha_chassis_group": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis_Group", "refType": "strong"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "networks": {"type": {"key": "string", "min": 1, "max": "unlimited"}}, "mac": {"type": "string"}, "peer": {"type": {"key": "string", "min": 0, "max": 1}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "ipv6_ra_configs": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ipv6_prefix": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "Logical_Router_Static_Route": { "columns": { "ip_prefix": {"type": "string"}, "policy": {"type": {"key": {"type": "string", "enum": ["set", ["src-ip", "dst-ip"]]}, "min": 0, "max": 1}}, "nexthop": {"type": "string"}, "output_port": {"type": {"key": "string", "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Logical_Router_Policy": { "columns": { "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "match": {"type": "string"}, "action": {"type": { "key": {"type": "string", "enum": ["set", ["allow", "drop", "reroute"]]}}}, "nexthop": {"type": {"key": "string", "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "NAT": { "columns": { "external_ip": {"type": "string"}, "external_mac": {"type": {"key": "string", "min": 0, "max": 1}}, "external_port_range": {"type": "string"}, "logical_ip": {"type": "string"}, "logical_port": {"type": {"key": "string", "min": 0, "max": 1}}, "type": {"type": {"key": {"type": "string", "enum": ["set", ["dnat", "snat", "dnat_and_snat" ]]}}}, "options": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "DHCP_Options": { "columns": { "cidr": {"type": "string"}, "options": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Connection": { "columns": { "target": {"type": "string"}, "max_backoff": {"type": {"key": {"type": "integer", "minInteger": 1000}, "min": 0, "max": 1}}, "inactivity_probe": {"type": {"key": "integer", "min": 0, "max": 1}}, "other_config": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "is_connected": {"type": "boolean", "ephemeral": true}, "status": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}, "ephemeral": true}}, "indexes": [["target"]]}, "DNS": { "columns": { "records": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "SSL": { "columns": { "private_key": {"type": "string"}, "certificate": {"type": "string"}, "ca_cert": {"type": "string"}, "bootstrap_ca_cert": {"type": "boolean"}, "ssl_protocols": {"type": "string"}, "ssl_ciphers": {"type": "string"}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "maxRows": 1}, "Gateway_Chassis": { "columns": { "name": {"type": "string"}, "chassis_name": {"type": "string"}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "HA_Chassis": { "columns": { "chassis_name": {"type": "string"}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "HA_Chassis_Group": { "columns": { "name": {"type": "string"}, "ha_chassis": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}} } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/schemas/ovn-sb.ovsschema0000664000175100017510000006371015033037524031112 0ustar00mylesmyles{ "name": "OVN_Southbound", "version": "20.17.0", "cksum": "669123379 26536", "tables": { "SB_Global": { "columns": { "nb_cfg": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "connections": { "type": {"key": {"type": "uuid", "refTable": "Connection"}, "min": 0, "max": "unlimited"}}, "ssl": { "type": {"key": {"type": "uuid", "refTable": "SSL"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ipsec": {"type": "boolean"}}, "maxRows": 1, "isRoot": true}, "Chassis": { "columns": { "name": {"type": "string"}, "hostname": {"type": "string"}, "encaps": {"type": {"key": {"type": "uuid", "refTable": "Encap"}, "min": 1, "max": "unlimited"}}, "vtep_logical_switches" : {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "nb_cfg": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "other_config": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "transport_zones" : {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true, "indexes": [["name"]]}, "Chassis_Private": { "columns": { "name": {"type": "string"}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "nb_cfg": {"type": {"key": "integer"}}, "nb_cfg_timestamp": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true, "indexes": [["name"]]}, "Encap": { "columns": { "type": {"type": {"key": { "type": "string", "enum": ["set", ["geneve", "stt", "vxlan"]]}}}, "options": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ip": {"type": "string"}, "chassis_name": {"type": "string"}}, "indexes": [["type", "ip"]]}, "Address_Set": { "columns": { "name": {"type": "string"}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Port_Group": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Logical_Flow": { "columns": { "logical_datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}, "min": 0, "max": 1}}, "logical_dp_group": {"type": {"key": {"type": "uuid", "refTable": "Logical_DP_Group"}, "min": 0, "max": 1}}, "pipeline": {"type": {"key": {"type": "string", "enum": ["set", ["ingress", "egress"]]}}}, "table_id": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32}}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 65535}}}, "match": {"type": "string"}, "actions": {"type": "string"}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Logical_DP_Group": { "columns": { "datapaths": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding", "refType": "weak"}, "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Multicast_Group": { "columns": { "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}}}, "name": {"type": "string"}, "tunnel_key": { "type": {"key": {"type": "integer", "minInteger": 32768, "maxInteger": 65535}}}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Port_Binding", "refType": "weak"}, "min": 0, "max": "unlimited"}}}, "indexes": [["datapath", "tunnel_key"], ["datapath", "name"]], "isRoot": true}, "Meter": { "columns": { "name": {"type": "string"}, "unit": {"type": {"key": {"type": "string", "enum": ["set", ["kbps", "pktps"]]}}}, "bands": {"type": {"key": {"type": "uuid", "refTable": "Meter_Band", "refType": "strong"}, "min": 1, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Meter_Band": { "columns": { "action": {"type": {"key": {"type": "string", "enum": ["set", ["drop"]]}}}, "rate": {"type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4294967295}}}, "burst_size": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 4294967295}}}}, "isRoot": false}, "Datapath_Binding": { "columns": { "tunnel_key": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 16777215}}}, "load_balancers": {"type": {"key": {"type": "uuid", "refTable": "Load_Balancer", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["tunnel_key"]], "isRoot": true}, "Port_Binding": { "columns": { "logical_port": {"type": "string"}, "type": {"type": "string"}, "gateway_chassis": { "type": {"key": {"type": "uuid", "refTable": "Gateway_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ha_chassis_group": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis_Group", "refType": "strong"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}}}, "tunnel_key": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 32767}}}, "parent_port": {"type": {"key": "string", "min": 0, "max": 1}}, "tag": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4095}, "min": 0, "max": 1}}, "virtual_parent": {"type": {"key": "string", "min": 0, "max": 1}}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "encap": {"type": {"key": {"type": "uuid", "refTable": "Encap", "refType": "weak"}, "min": 0, "max": 1}}, "mac": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "nat_addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "up": {"type": {"key": "boolean", "min": 0, "max": 1}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["datapath", "tunnel_key"], ["logical_port"]], "isRoot": true}, "MAC_Binding": { "columns": { "logical_port": {"type": "string"}, "ip": {"type": "string"}, "mac": {"type": "string"}, "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}}}}, "indexes": [["logical_port", "ip"]], "isRoot": true}, "DHCP_Options": { "columns": { "name": {"type": "string"}, "code": { "type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 254}}}, "type": { "type": {"key": { "type": "string", "enum": ["set", ["bool", "uint8", "uint16", "uint32", "ipv4", "static_routes", "str", "host_id", "domains"]]}}}}, "isRoot": true}, "DHCPv6_Options": { "columns": { "name": {"type": "string"}, "code": { "type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 254}}}, "type": { "type": {"key": { "type": "string", "enum": ["set", ["ipv6", "str", "mac"]]}}}}, "isRoot": true}, "Connection": { "columns": { "target": {"type": "string"}, "max_backoff": {"type": {"key": {"type": "integer", "minInteger": 1000}, "min": 0, "max": 1}}, "inactivity_probe": {"type": {"key": "integer", "min": 0, "max": 1}}, "read_only": {"type": "boolean"}, "role": {"type": "string"}, "other_config": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "is_connected": {"type": "boolean", "ephemeral": true}, "status": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}, "ephemeral": true}}, "indexes": [["target"]]}, "SSL": { "columns": { "private_key": {"type": "string"}, "certificate": {"type": "string"}, "ca_cert": {"type": "string"}, "bootstrap_ca_cert": {"type": "boolean"}, "ssl_protocols": {"type": "string"}, "ssl_ciphers": {"type": "string"}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "maxRows": 1}, "DNS": { "columns": { "records": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "datapaths": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}, "min": 1, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "RBAC_Role": { "columns": { "name": {"type": "string"}, "permissions": { "type": {"key": {"type": "string"}, "value": {"type": "uuid", "refTable": "RBAC_Permission", "refType": "weak"}, "min": 0, "max": "unlimited"}}}, "isRoot": true}, "RBAC_Permission": { "columns": { "table": {"type": "string"}, "authorization": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "insert_delete": {"type": "boolean"}, "update" : {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Gateway_Chassis": { "columns": { "name": {"type": "string"}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "HA_Chassis": { "columns": { "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "HA_Chassis_Group": { "columns": { "name": {"type": "string"}, "ha_chassis": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ref_chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Controller_Event": { "columns": { "event_type": {"type": {"key": {"type": "string", "enum": ["set", ["empty_lb_backends"]]}}}, "event_info": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "seq_num": {"type": {"key": "integer"}} }, "isRoot": true}, "IP_Multicast": { "columns": { "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding", "refType": "weak"}}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "querier": {"type": {"key": "boolean", "min": 0, "max": 1}}, "eth_src": {"type": "string"}, "ip4_src": {"type": "string"}, "ip6_src": {"type": "string"}, "table_size": {"type": {"key": "integer", "min": 0, "max": 1}}, "idle_timeout": {"type": {"key": "integer", "min": 0, "max": 1}}, "query_interval": {"type": {"key": "integer", "min": 0, "max": 1}}, "query_max_resp": {"type": {"key": "integer", "min": 0, "max": 1}}, "seq_no": {"type": "integer"}}, "indexes": [["datapath"]], "isRoot": true}, "IGMP_Group": { "columns": { "address": {"type": "string"}, "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding", "refType": "weak"}, "min": 0, "max": 1}}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Port_Binding", "refType": "weak"}, "min": 0, "max": "unlimited"}}}, "indexes": [["address", "datapath", "chassis"]], "isRoot": true}, "Service_Monitor": { "columns": { "ip": {"type": "string"}, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp"]]}, "min": 0, "max": 1}}, "port": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "logical_port": {"type": "string"}, "src_mac": {"type": "string"}, "src_ip": {"type": "string"}, "status": { "type": {"key": {"type": "string", "enum": ["set", ["online", "offline", "error"]]}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["logical_port", "ip", "port", "protocol"]], "isRoot": true}, "Load_Balancer": { "columns": { "name": {"type": "string"}, "vips": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp", "sctp"]]}, "min": 0, "max": 1}}, "datapaths": { "type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}, "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "BFD": { "columns": { "src_port": {"type": {"key": {"type": "integer", "minInteger": 49152, "maxInteger": 65535}}}, "disc": {"type": {"key": {"type": "integer"}}}, "logical_port": {"type": "string"}, "dst_ip": {"type": "string"}, "min_tx": {"type": {"key": {"type": "integer"}}}, "min_rx": {"type": {"key": {"type": "integer"}}}, "detect_mult": {"type": {"key": {"type": "integer"}}}, "status": { "type": {"key": {"type": "string", "enum": ["set", ["down", "init", "up", "admin_down"]]}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["logical_port", "dst_ip", "src_port", "disc"]], "isRoot": true}, "FDB": { "columns": { "mac": {"type": "string"}, "dp_key": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 16777215}}}, "port_key": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 16777215}}}}, "indexes": [["mac", "dp_key"]], "isRoot": true} } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/test_agent.py0000664000175100017510000000220715033037524027052 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from ovn_octavia_provider import agent as ovn_agent from ovn_octavia_provider.tests.unit import base as ovn_base class TestOvnProviderAgent(ovn_base.TestOvnOctaviaBase): def test_exit(self): mock_exit_event = mock.MagicMock() mock_exit_event.is_set.side_effect = [False, False, False, False, True] ovn_agent.OvnProviderAgent(mock_exit_event) self.assertEqual(1, mock_exit_event.wait.call_count) self.assertEqual(3, self.mock_ovn_nb_idl.call_count) self.assertEqual(1, self.mock_ovn_sb_idl.call_count) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/test_cmd.py0000664000175100017510000000440515033037524026521 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import sys from unittest import mock from oslo_config import cfg from oslo_log import log from ovn_octavia_provider.cmd import octavia_ovn_db_sync_util from ovn_octavia_provider import driver from ovn_octavia_provider.tests.unit import base as ovn_base class TestCMD(ovn_base.TestOvnOctaviaBase): def setUp(self): super().setUp() mock.patch.object(log, 'register_options').start() self.m_cfg = mock.patch.object( cfg.ConfigOpts, '__call__').start() @mock.patch.object(driver.OvnProviderDriver, 'do_sync') def test_octavia_ovn_db_sync_util(self, m_sync): octavia_ovn_db_sync_util.main() m_sync.assert_called_once_with(provider='ovn') @mock.patch.object(cfg.CONF, 'set_override') @mock.patch.object(driver.OvnProviderDriver, 'do_sync') def test_octavia_ovn_db_sync_util_with_debug(self, m_sync, m_cfg_or): return_value = ['octavia-ovn-db-sync-util', '--debug'] return_value_no_debug = ['octavia-ovn-db-sync-util'] with mock.patch.object(sys, 'argv', return_value): octavia_ovn_db_sync_util.main() with mock.patch.object(sys, 'argv', return_value_no_debug): octavia_ovn_db_sync_util.main() m_cfg_or.assert_has_calls([mock.call('debug', True), mock.call('debug', False)]) @mock.patch.object(octavia_ovn_db_sync_util, 'LOG') def test_octavia_ovn_db_sync_util_config_error(self, m_log): self.m_cfg.side_effect = [TypeError()] self.assertRaises(TypeError, octavia_ovn_db_sync_util.main) msg = ("Error parsing the configuration values. Please verify.") m_log.error.assert_called_once_with(msg) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/test_driver.py0000664000175100017510000033407415033037524027261 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from unittest import mock from octavia_lib.api.drivers import data_models from octavia_lib.api.drivers import driver_lib as o_driver_lib from octavia_lib.api.drivers import exceptions from octavia_lib.common import constants import openstack from oslo_utils import uuidutils from ovsdbapp.backend.ovs_idl import idlutils from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider.common import exceptions as ovn_exc from ovn_octavia_provider import driver as ovn_driver from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.tests.unit import base as ovn_base from ovn_octavia_provider.tests.unit import fakes class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase): def setUp(self): super().setUp() self.driver = ovn_driver.OvnProviderDriver() add_req_thread = mock.patch.object(ovn_helper.OvnProviderHelper, 'add_request') self.member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.member_line_additional_vips = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.ovn_lb = mock.MagicMock() self.ovn_lb.name = 'foo_ovn_lb' self.fake_vip = '10.22.33.4' self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: self.fake_vip, 'pool_%s' % self.pool_id: self.member_line, 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} self.ovn_lb_addi_vips = mock.MagicMock() self.ovn_lb_addi_vips.name = 'foo_ovn_lb_addi_vips' self.ovn_lb_addi_vips.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4', ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: '2001:db8:0:1::203', 'pool_%s' % self.pool_id: ','.join([ self.member_line, self.member_line_additional_vips]), 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} self.mock_add_request = add_req_thread.start() self.project_id = uuidutils.generate_uuid() self.fail_member = data_models.Member( address='198.51.100.4', admin_state_up=True, member_id=self.member_id, monitor_address="100.200.200.100", monitor_port=66, name='Amazin', pool_id=self.pool_id, protocol_port=99, subnet_id=self.member_subnet_id, weight=55) self.ref_member = data_models.Member( address='198.52.100.4', admin_state_up=True, member_id=self.member_id, monitor_address=data_models.Unset, monitor_port=data_models.Unset, name='Amazing', pool_id=self.pool_id, protocol_port=99, subnet_id=self.member_subnet_id, weight=55) self.update_member = data_models.Member( address='198.53.100.4', admin_state_up=False, member_id=self.member_id, monitor_address=data_models.Unset, monitor_port=data_models.Unset, name='Amazin', pool_id=self.pool_id, protocol_port=99, subnet_id=self.member_subnet_id, weight=55) self.ref_update_pool = data_models.Pool( admin_state_up=False, description='pool', name='Peter', lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT, loadbalancer_id=self.loadbalancer_id, listener_id=self.listener_id, members=[self.ref_member], pool_id=self.pool_id, protocol='TCP', session_persistence={'type': 'SOURCE_IP'}) self.ref_pool = data_models.Pool( admin_state_up=True, description='pool', name='Peter', lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT, loadbalancer_id=self.loadbalancer_id, listener_id=self.listener_id, members=[self.ref_member], pool_id=self.pool_id, protocol='TCP', session_persistence={'type': 'SOURCE_IP'}) self.ref_http_pool = data_models.Pool( admin_state_up=True, description='pool', lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT, loadbalancer_id=self.loadbalancer_id, listener_id=self.listener_id, members=[self.ref_member], name='Groot', pool_id=self.pool_id, protocol='HTTP', session_persistence={'type': 'fix'}) self.ref_lc_pool = data_models.Pool( admin_state_up=True, description='pool', lb_algorithm=constants.LB_ALGORITHM_LEAST_CONNECTIONS, loadbalancer_id=self.loadbalancer_id, listener_id=self.listener_id, members=[self.ref_member], name='Groot', pool_id=self.pool_id, protocol='HTTP', session_persistence={'type': 'fix'}) self.ref_listener = data_models.Listener( admin_state_up=False, connection_limit=5, default_pool=self.ref_pool, default_pool_id=self.pool_id, listener_id=self.listener_id, loadbalancer_id=self.loadbalancer_id, name='listener', protocol='TCP', protocol_port=42) self.ref_listener_udp = data_models.Listener( admin_state_up=False, connection_limit=5, default_pool=self.ref_pool, default_pool_id=self.pool_id, listener_id=self.listener_id, loadbalancer_id=self.loadbalancer_id, name='listener', protocol='UDP', protocol_port=42) self.ref_listener_sctp = data_models.Listener( admin_state_up=False, connection_limit=5, default_pool=self.ref_pool, default_pool_id=self.pool_id, listener_id=self.listener_id, loadbalancer_id=self.loadbalancer_id, name='listener', protocol='SCTP', protocol_port=42) self.fail_listener = data_models.Listener( admin_state_up=False, connection_limit=5, default_pool=self.ref_pool, default_pool_id=self.pool_id, listener_id=self.listener_id, loadbalancer_id=self.loadbalancer_id, name='listener', protocol='http', protocol_port=42) self.ref_lb_fully_populated = data_models.LoadBalancer( admin_state_up=False, listeners=[self.ref_listener], pools=[self.ref_pool], loadbalancer_id=self.loadbalancer_id, name='favorite_lb0', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id, vip_subnet_id=self.vip_subnet_id) self.ref_lb0 = data_models.LoadBalancer( admin_state_up=False, listeners=[self.ref_listener], loadbalancer_id=self.loadbalancer_id, name='favorite_lb0', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id) self.ref_lb1 = data_models.LoadBalancer( admin_state_up=True, listeners=[self.ref_listener], loadbalancer_id=self.loadbalancer_id, name='favorite_lb1', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id) self.ref_lb2 = data_models.LoadBalancer( admin_state_up=False, listeners=[self.ref_listener], loadbalancer_id=self.loadbalancer_id, name='additional_vips_lb2', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id, additional_vips=self.additional_vips) self.fail_health_monitor = data_models.HealthMonitor( admin_state_up=True, name='UnHealthy', pool_id=self.pool_id, healthmonitor_id=self.healthmonitor_id, type="not_valid", delay=1, timeout=2, max_retries_down=3, max_retries=4) self.ref_health_monitor = data_models.HealthMonitor( admin_state_up=True, name='Healthy', pool_id=self.pool_id, healthmonitor_id=self.healthmonitor_id, type=constants.HEALTH_MONITOR_TCP, delay=6, timeout=7, max_retries_down=5, max_retries=3) self.ref_update_health_monitor = data_models.HealthMonitor( admin_state_up=True, name='ReHealthy', healthmonitor_id=self.healthmonitor_id, delay=16, timeout=17, max_retries_down=15, max_retries=13) self.ref_pool_with_hm = data_models.Pool( admin_state_up=True, description='pool', name='Peter', lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT, loadbalancer_id=self.loadbalancer_id, listener_id=self.listener_id, healthmonitor=self.ref_health_monitor, members=[self.ref_member], pool_id=self.pool_id, protocol='TCP', session_persistence={'type': 'SOURCE_IP'}) self.ref_lb_fully_sync_populated = data_models.LoadBalancer( admin_state_up=False, listeners=[self.ref_listener], pools=[self.ref_pool_with_hm], loadbalancer_id=self.loadbalancer_id, name='favorite_lb0', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id) mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lbs', side_effect=lambda x, protocol=None: self.ovn_lb if protocol else [self.ovn_lb]).start() self.mock_find_lb_pool_key = mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lb_with_pool_key', return_value=self.ovn_lb).start() self.mock_find_ovn_lbs_with_retry = mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lbs_with_retry', return_value=self.ovn_lb).start() self.mock_get_subnet_from_pool = mock.patch.object( ovn_helper.OvnProviderHelper, '_get_subnet_from_pool', return_value=(None, None)).start() self.mock_check_ip_in_subnet = mock.patch.object( ovn_helper.OvnProviderHelper, '_check_ip_in_subnet', return_value=True).start() self.fake_fip = '1.2.3.4' self.lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_PORT_FIP_EXT_ID_KEY: self.fake_fip, ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'ovn-lb-vip-d571f37e-5708-48a1-8ba8-fc5d9ce36eac', }, 'type': 'localnet', 'options': {}, }) self.lsp_addi = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_PORT_FIP_EXT_ID_KEY: self.fake_fip, ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'ovn-lb-vip-additional--\ 19d4c20e-05ff-4534-9d5c-214f2979db40', }, 'type': 'localnet', 'options': {}, }) self.mock_get_lsp = mock.patch.object( ovn_helper.OvnProviderHelper, 'get_lsp', return_value=self.lsp).start() self.mock_find_ovn_lbs_with_retry = mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lbs_with_retry', return_value=[self.ovn_lb]).start() def test_check_for_allowed_cidrs_exception(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver._check_for_allowed_cidrs, '10.0.0.1') def test__ip_version_differs(self): self.assertFalse(self.driver._ip_version_differs(self.ref_member)) self.ref_member.address = 'fc00::1' self.assertTrue(self.driver._ip_version_differs(self.ref_member)) def test__ip_version_differs_lb_additional_vips(self): self.mock_find_ovn_lb_by_pool_id = mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id').start() self.mock_find_ovn_lb_by_pool_id.return_value = (_, self.ovn_lb_addi_vips) self.ref_member.address = 'fc00::1' self.assertFalse(self.driver._ip_version_differs(self.ref_member)) def test__ip_version_differs_lb_not_found(self): self.mock_find_ovn_lb_by_pool_id = mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id').start() self.mock_find_ovn_lb_by_pool_id.return_value = (_, None) self.assertFalse(self.driver._ip_version_differs(self.ref_member)) def test__ip_version_differs_pool_disabled(self): self.mock_find_lb_pool_key.side_effect = [None, self.ovn_lb] self.driver._ip_version_differs(self.ref_member) self.mock_find_lb_pool_key.assert_has_calls([ mock.call('pool_%s' % self.pool_id), mock.call('pool_%s:D' % self.pool_id)]) def _test_member_create(self, member): info = {'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'admin_state_up': self.ref_member.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_CREATE, 'info': info} info_dvr = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} expected_dict_dvr = { 'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': info_dvr} self.driver.member_create(member) expected = [ mock.call(expected_dict), mock.call(expected_dict_dvr)] self.mock_add_request.assert_has_calls(expected) def test_member_create(self): self._test_member_create(self.ref_member) def test_member_create_failure(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.fail_member) def test_member_create_different_ip_version(self): self.ref_member.address = 'fc00::1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) def test_member_create_different_ip_version_lb_disable(self): self.driver._ovn_helper._find_ovn_lb_with_pool_key.side_effect = [ None, self.ovn_lb] self.ref_member.address = 'fc00::1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) self.driver._ovn_helper._find_ovn_lb_with_pool_key.assert_has_calls( [mock.call('pool_%s' % self.pool_id), mock.call('pool_%s%s' % (self.pool_id, ':D'))]) def test_member_create_no_subnet_provided(self): self.ref_member.subnet_id = data_models.UnsetType() self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) self.ref_member.subnet_id = None self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) def test_member_create_no_subnet_provided_get_from_pool(self): self.driver._ovn_helper._get_subnet_from_pool.return_value = ( self.ref_member.subnet_id, '198.52.100.0/24') self.driver._ovn_helper._check_ip_in_subnet.return_value = False self.ref_member.subnet_id = data_models.UnsetType() self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) self.ref_member.subnet_id = None self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) def test_member_create_no_subnet_provided_get_from_pool_failed(self): self.driver._ovn_helper._get_subnet_from_pool.return_value = ( self.ref_member.subnet_id, '198.52.100.0/24') member = copy.copy(self.ref_member) member.subnet_id = data_models.UnsetType() self._test_member_create(member) member.subnet_id = None self._test_member_create(member) def test__check_monitor_options_member_no_monitor_data(self): self.ref_member.monitor_address = None self.assertFalse(self.driver._check_monitor_options(self.ref_member)) def test_member_create_monitor_opts(self): self.ref_member.monitor_address = '172.20.20.1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) self.ref_member.monitor_port = '80' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_create, self.ref_member) def test_member_create_no_set_admin_state_up(self): self.ref_member.admin_state_up = data_models.UnsetType() info = {'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'admin_state_up': True} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_CREATE, 'info': info} expected_dict_dvr = {'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': mock.ANY} expected = [ mock.call(expected_dict), mock.call(expected_dict_dvr)] self.driver.member_create(self.ref_member) self.mock_add_request.assert_has_calls(expected) def test_member_update(self): info = {'id': self.update_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'admin_state_up': self.update_member.admin_state_up, 'old_admin_state_up': self.ref_member.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_UPDATE, 'info': info} self.driver.member_update(self.ref_member, self.update_member) self.mock_add_request.assert_called_once_with(expected_dict) def test_member_update_missing_subnet_id(self): self.driver._ovn_helper._get_subnet_from_pool.return_value = ( self.ref_member.subnet_id, '198.52.100.0/24') info = {'id': self.update_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'admin_state_up': self.update_member.admin_state_up, 'old_admin_state_up': self.ref_member.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_UPDATE, 'info': info} member = copy.copy(self.ref_member) member.subnet_id = data_models.UnsetType() self.driver.member_update(member, self.update_member) self.mock_add_request.assert_called_once_with(expected_dict) def test_member_update_unset_admin_state_up(self): self.driver._ovn_helper._get_subnet_from_pool.return_value = ( self.ref_member.subnet_id, '198.52.100.0/24') self.update_member.admin_state_up = data_models.UnsetType() info = {'id': self.update_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'old_admin_state_up': self.ref_member.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_UPDATE, 'info': info} member = copy.copy(self.ref_member) member.subnet_id = data_models.UnsetType() self.driver.member_update(member, self.update_member) self.mock_add_request.assert_called_once_with(expected_dict) @mock.patch.object(ovn_driver.OvnProviderDriver, '_ip_version_differs') def test_member_update_no_ip_addr(self, mock_ip_differs): self.update_member.address = None self.driver.member_update(self.ref_member, self.update_member) mock_ip_differs.assert_not_called() def test_member_batch_update(self): self.driver.member_batch_update(self.pool_id, [self.ref_member, self.update_member]) self.assertEqual(self.mock_add_request.call_count, 4) def test_member_batch_update_member_delete(self): info_md = { 'id': self.ref_member.member_id, 'address': mock.ANY, 'protocol_port': mock.ANY, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id} expected_dict_md = { 'type': ovn_const.REQ_TYPE_MEMBER_DELETE, 'info': info_md} expected = [ mock.call(expected_dict_md)] self.driver.member_batch_update(self.pool_id, []) self.assertEqual(self.mock_add_request.call_count, 2) self.mock_add_request.assert_has_calls(expected) def test_member_batch_update_no_members(self): pool_key = 'pool_%s' % self.pool_id ovn_lb = copy.copy(self.ovn_lb) ovn_lb.external_ids[pool_key] = [] self.mock_find_lb_pool_key.return_value = ovn_lb self.driver.member_batch_update(self.pool_id, [self.ref_member, self.update_member]) self.assertEqual(self.mock_add_request.call_count, 2) def test_member_batch_update_skipped_monitor(self): self.ref_member.monitor_address = '10.11.1.1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_batch_update, self.pool_id, [self.ref_member]) def test_member_batch_update_skipped_mixed_ip(self): self.ref_member.address = 'fc00::1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_batch_update, self.pool_id, [self.ref_member]) def test_member_batch_update_unset_admin_state_up(self): self.ref_member.admin_state_up = data_models.UnsetType() self.driver.member_batch_update(self.pool_id, [self.ref_member]) self.assertEqual(self.mock_add_request.call_count, 3) def test_member_batch_update_toggle_admin_state_up(self): info_mu = { 'id': self.ref_member.member_id, 'address': self.member_address, 'protocol_port': self.member_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'admin_state_up': False} expected_dict_mu = { 'type': ovn_const.REQ_TYPE_MEMBER_UPDATE, 'info': info_mu} expected = [ mock.call(expected_dict_mu)] self.ref_member.admin_state_up = False self.ref_member.address = self.member_address self.ref_member.protocol_port = self.member_port self.driver.member_batch_update(self.pool_id, [self.ref_member]) self.assertEqual(self.mock_add_request.call_count, 1) self.mock_add_request.assert_has_calls(expected) def test_member_batch_update_missing_subnet_id(self): self.ref_member.subnet_id = None self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_batch_update, self.pool_id, [self.ref_member]) def test_member_batch_update_missing_subnet_id_get_from_pool(self): self.driver._ovn_helper._get_subnet_from_pool.return_value = ( self.ref_member.subnet_id, '198.52.100.0/24') self.ref_member.subnet_id = None self.driver.member_batch_update(self.pool_id, [self.ref_member]) def test_member_batch_update_missing_subnet_id_get_from_pool_fail(self): self.driver._ovn_helper._get_subnet_from_pool.return_value = ( self.ref_member.subnet_id, '198.52.100.0/24') self.driver._ovn_helper._check_ip_in_subnet.return_value = False self.ref_member.subnet_id = None self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_batch_update, self.pool_id, [self.ref_member]) def test_member_update_failure(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_update, self.ref_member, self.fail_member) def test_member_update_different_ip_version(self): self.ref_member.address = 'fc00::1' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_update, self.ref_member, self.ref_member) def test_member_delete(self): info = {'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_DELETE, 'info': info} info_dvr = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_DELETED} expected_dict_dvr = { 'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': info_dvr} self.driver.member_delete(self.ref_member) expected = [ mock.call(expected_dict), mock.call(expected_dict_dvr)] self.mock_add_request.assert_has_calls(expected) def test_member_delete_missing_subnet_id(self): self.driver._ovn_helper._get_subnet_from_pool.return_value = ( self.ref_member.subnet_id, '198.52.100.0/24') info = {'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id} expected_dict = {'type': ovn_const.REQ_TYPE_MEMBER_DELETE, 'info': info} info_dvr = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_DELETED} expected_dict_dvr = { 'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': info_dvr} member = copy.copy(self.ref_member) member.subnet_id = data_models.UnsetType() self.driver.member_delete(member) expected = [ mock.call(expected_dict), mock.call(expected_dict_dvr)] self.mock_add_request.assert_has_calls(expected) def test_member_delete_missing_subnet_id_differs_from_lb_vip(self): self.driver._ovn_helper._get_subnet_from_pool.return_value = ( self.ref_member.subnet_id, '198.52.100.0/24') self.driver._ovn_helper._check_ip_in_subnet.return_value = False self.ref_member.subnet_id = data_models.UnsetType() self.assertRaises(exceptions.UnsupportedOptionError, self.driver.member_delete, self.ref_member) def test_listener_create(self): info = {'id': self.ref_listener.listener_id, 'protocol': self.ref_listener.protocol, 'protocol_port': self.ref_listener.protocol_port, 'default_pool_id': self.ref_listener.default_pool_id, 'admin_state_up': self.ref_listener.admin_state_up, 'loadbalancer_id': self.ref_listener.loadbalancer_id} expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info} self.driver.listener_create(self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) def test_listener_create_unset_admin_state_up(self): self.ref_listener.admin_state_up = data_models.UnsetType() info = {'id': self.ref_listener.listener_id, 'protocol': self.ref_listener.protocol, 'protocol_port': self.ref_listener.protocol_port, 'default_pool_id': self.ref_listener.default_pool_id, 'admin_state_up': True, 'loadbalancer_id': self.ref_listener.loadbalancer_id} expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info} self.driver.listener_create(self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) def test_listener_create_unsupported_protocol(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver.listener_create, self.fail_listener) def test_listener_create_multiple_protocols(self): self.ovn_lb.protocol = ['TCP'] info = {'id': self.ref_listener.listener_id, 'protocol': self.ref_listener.protocol, 'protocol_port': self.ref_listener.protocol_port, 'default_pool_id': self.ref_listener.default_pool_id, 'admin_state_up': self.ref_listener.admin_state_up, 'loadbalancer_id': self.ref_listener.loadbalancer_id} expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info} self.driver.listener_create(self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) self.ovn_lb.protocol = ['UDP'] info['protocol'] = 'UDP' expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info} self.driver.listener_create(self.ref_listener) self.ovn_lb.protocol = ['SCTP'] info['protocol'] = 'SCTP' expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info} self.driver.listener_create(self.ref_listener) def test_listener_update(self): info = {'id': self.ref_listener.listener_id, 'protocol_port': self.ref_listener.protocol_port, 'protocol': self.ref_pool.protocol, 'admin_state_up': self.ref_listener.admin_state_up, 'loadbalancer_id': self.ref_listener.loadbalancer_id} if self.ref_listener.default_pool_id: info['default_pool_id'] = self.ref_listener.default_pool_id expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_UPDATE, 'info': info} self.driver.listener_update(self.ref_listener, self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) def test_listener_update_unset_admin_state_up(self): self.ref_listener.admin_state_up = data_models.UnsetType() info = {'id': self.ref_listener.listener_id, 'protocol_port': self.ref_listener.protocol_port, 'protocol': self.ref_pool.protocol, 'loadbalancer_id': self.ref_listener.loadbalancer_id} if self.ref_listener.default_pool_id: info['default_pool_id'] = self.ref_listener.default_pool_id expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_UPDATE, 'info': info} self.driver.listener_update(self.ref_listener, self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) def test_listener_update_unset_default_pool_id(self): self.ref_listener.default_pool_id = data_models.UnsetType() info = {'id': self.ref_listener.listener_id, 'protocol_port': self.ref_listener.protocol_port, 'protocol': self.ref_pool.protocol, 'admin_state_up': self.ref_listener.admin_state_up, 'loadbalancer_id': self.ref_listener.loadbalancer_id} expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_UPDATE, 'info': info} self.driver.listener_update(self.ref_listener, self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) def test_listener_delete(self): info = {'id': self.ref_listener.listener_id, 'protocol_port': self.ref_listener.protocol_port, 'protocol': self.ref_pool.protocol, 'loadbalancer_id': self.ref_listener.loadbalancer_id} expected_dict = {'type': ovn_const.REQ_TYPE_LISTENER_DELETE, 'info': info} self.driver.listener_delete(self.ref_listener) self.mock_add_request.assert_called_once_with(expected_dict) def test_loadbalancer_fully_populate_create(self): info = { 'id': self.ref_lb_fully_populated.loadbalancer_id, 'vip_address': self.ref_lb_fully_populated.vip_address, 'vip_network_id': self.ref_lb_fully_populated.vip_network_id, 'admin_state_up': self.ref_lb_fully_populated.admin_state_up} info_listener = { 'id': self.ref_listener.listener_id, 'protocol': self.ref_listener.protocol, 'protocol_port': self.ref_listener.protocol_port, 'default_pool_id': self.ref_listener.default_pool_id, 'admin_state_up': self.ref_listener.admin_state_up, 'loadbalancer_id': self.ref_listener.loadbalancer_id} info_pool = { 'id': self.ref_pool.pool_id, 'loadbalancer_id': self.ref_pool.loadbalancer_id, 'listener_id': self.ref_pool.listener_id, 'protocol': self.ref_pool.protocol, 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'admin_state_up': self.ref_pool.admin_state_up, 'session_persistence': {'type': 'SOURCE_IP'}} info_member = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'admin_state_up': self.ref_member.admin_state_up} info_dvr = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} expected_lb_dict = { 'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': info} expected_listener_dict = { 'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info_listener} expected_pool_dict = { 'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': info_pool} expected_member_dict = { 'type': ovn_const.REQ_TYPE_MEMBER_CREATE, 'info': info_member} expected_dict_dvr = { 'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': info_dvr} calls = [mock.call(expected_lb_dict), mock.call(expected_listener_dict), mock.call(expected_pool_dict), mock.call(expected_member_dict), mock.call(expected_dict_dvr)] self.driver.loadbalancer_create(self.ref_lb_fully_populated) self.mock_add_request.assert_has_calls(calls) def test_loadbalancer_create(self): info = {'id': self.ref_lb0.loadbalancer_id, 'vip_address': self.ref_lb0.vip_address, 'vip_network_id': self.ref_lb0.vip_network_id, 'admin_state_up': self.ref_lb0.admin_state_up} expected_dict = { 'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': info} calls = [mock.call(expected_dict)] self.driver.loadbalancer_create(self.ref_lb0) self.mock_add_request.assert_has_calls(calls) @mock.patch.object(ovn_helper.OvnProviderHelper, 'vip_port_update_handler') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_fip_sync_basic(self, net_cli, m_vpu): net_cli.return_value.ips.return_value = [ mock.Mock(floating_ip_address=self.fake_fip)] self.ref_lb_fully_sync_populated.vip_port_id = 'foo' self.driver._fip_sync(self.ref_lb_fully_sync_populated) m_vpu.assert_called_once_with( vip_lp=self.lsp, fip=self.fake_fip, action=ovn_const.REQ_INFO_ACTION_SYNC) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_fip_sync(self, net_cli): net_cli.return_value.ips.return_value = [ mock.Mock(floating_ip_address=self.fake_fip)] fake_port = fakes.FakePort.create_one_port() fake_port['fixed_ips'][0]['ip_address'] = self.fake_vip net_cli.return_value.get_port.return_value = fake_port self.ref_lb_fully_sync_populated.vip_port_id = 'foo' with mock.patch.object(ovn_helper.OvnProviderHelper, 'handle_vip_fip') as mock_handle_vip_fip: info = { 'ovn_lb': self.ovn_lb, 'vip_fip': self.fake_fip, 'vip_related': [self.fake_vip], 'additional_vip_fip': False, 'action': ovn_const.REQ_INFO_ACTION_SYNC } self.driver._fip_sync(self.ref_lb_fully_sync_populated) mock_handle_vip_fip.assert_called_once_with(info) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_fip_sync_ovn_lb_not_found(self, net_cli): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] net_cli.return_value.ips.return_value = [ mock.Mock(floating_ip_address=self.fake_fip)] self.ref_lb_fully_sync_populated.vip_port_id = 'foo' with mock.patch.object(ovn_helper, 'LOG') as m_l: self.driver._fip_sync(self.ref_lb_fully_sync_populated) m_l.debug.assert_called() self.mock_add_request.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_fip_sync_addi(self, net_cli): net_cli.return_value.ips.return_value = [ mock.Mock(floating_ip_address=self.fake_fip)] fake_port = fakes.FakePort.create_one_port() self.mock_get_lsp.return_value = self.lsp_addi fake_port['fixed_ips'][0]['ip_address'] = self.fake_vip net_cli.return_value.get_port.return_value = fake_port self.ref_lb_fully_sync_populated.vip_port_id = 'foo' with mock.patch.object(ovn_helper.OvnProviderHelper, 'handle_vip_fip') as mock_handle_vip_fip: info = { 'ovn_lb': self.ovn_lb, 'vip_fip': self.fake_fip, 'vip_related': [self.fake_vip], 'additional_vip_fip': True, 'action': ovn_const.REQ_INFO_ACTION_SYNC } self.driver._fip_sync(self.ref_lb_fully_sync_populated) mock_handle_vip_fip.assert_called_once_with(info) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_fip_sync_os_err(self, net_cli): net_cli.return_value.ips.side_effect = [ openstack.exceptions.HttpException] self.ref_lb_fully_sync_populated.vip_port_id = 'foo' calls = [ mock.call('Floating IP not found for loadbalancer ' f'{self.ref_lb_fully_sync_populated.loadbalancer_id}'), mock.call('Floating IP not consistent between Logic Switch Port ' f'and Neutron. Found FIP {self.fake_fip} configured in ' f'LSP {self.lsp.name}, but no FIP configured from ' 'Neutron. Please run command `neutron-ovn-db-sync-util` ' 'first to sync OVN DB with Neutron DB.')] with mock.patch.object(ovn_driver, 'LOG') as m_l: self.driver._fip_sync(self.ref_lb_fully_sync_populated) m_l.warn.assert_has_calls(calls) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_fip_sync_lsp_mismatch(self, net_cli): net_cli.return_value.ips.return_value = [ mock.Mock(floating_ip_address='1.2.3.5')] self.ref_lb_fully_sync_populated.vip_port_id = 'foo' msg = ('Floating IP not consistent between Logic Switch Port and ' f'Neutron. Found FIP {self.fake_fip} in LSP {self.lsp.name}, ' 'but we have 1.2.3.5 from Neutron. Skip sync FIP for ' 'loadbalancer ' f'{self.ref_lb_fully_sync_populated.loadbalancer_id}. ' 'Please run command `neutron-ovn-db-sync-util` first to ' 'sync OVN DB with Neutron DB.') with mock.patch.object(ovn_driver, 'LOG') as m_l: self.driver._fip_sync(self.ref_lb_fully_sync_populated) m_l.warn.assert_called_once_with(msg) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_fip_sync_lsp_not_found(self, net_cli): net_cli.return_value.ips.return_value = [ mock.Mock(floating_ip_address='1.2.3.4')] self.ref_lb_fully_sync_populated.vip_port_id = 'foo' self.mock_get_lsp.return_value = None msg = ( 'Logic Switch Port not found for port foo. Skip sync FIP for ' 'loadbalancer ' f'{self.ref_lb_fully_sync_populated.loadbalancer_id}. Please ' 'run command `neutron-ovn-db-sync-util` first to sync OVN DB ' 'with Neutron DB.') with mock.patch.object(ovn_driver, 'LOG') as m_l: self.driver._fip_sync(self.ref_lb_fully_sync_populated) m_l.warn.assert_called_once_with(msg) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_fip_sync_no_neutron_fip(self, net_cli): net_cli.return_value.ips.return_value = [] self.ref_lb_fully_sync_populated.vip_port_id = 'foo' calls = [ mock.call('Floating IP not found for loadbalancer ' f'{self.ref_lb_fully_sync_populated.loadbalancer_id}'), mock.call('Floating IP not consistent between Logic Switch Port ' f'and Neutron. Found FIP {self.fake_fip} configured in ' f'LSP {self.lsp.name}, but no FIP configured from ' 'Neutron. Please run command `neutron-ovn-db-sync-util` ' 'first to sync OVN DB with Neutron DB.')] self.driver.pool_create(self.ref_pool) self.mock_add_request with mock.patch.object(ovn_driver, 'LOG') as m_l: self.driver._fip_sync(self.ref_lb_fully_sync_populated) m_l.warn.assert_has_calls(calls) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_fip_sync_no_neutron_fip_no_lsp(self, net_cli): net_cli.return_value.ips.return_value = [] self.ref_lb_fully_sync_populated.vip_port_id = 'foo' self.mock_get_lsp.return_value = None self.driver._fip_sync(self.ref_lb_fully_sync_populated) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_fip_sync_no_port_id(self, net_cli): net_cli.return_value.ips.return_value = [ mock.Mock(floating_ip_address='1.2.3.4')] msg = ('VIP Port or Network not set for loadbalancer ' f'{self.ref_lb_fully_sync_populated.loadbalancer_id}, ' 'skip FIP sync.') with mock.patch.object(ovn_driver, 'LOG') as m_l: self.driver._fip_sync(self.ref_lb_fully_sync_populated) m_l.debug.assert_called_once_with(msg) def test_loadbalancer_create_additional_vips(self): info = {'id': self.ref_lb2.loadbalancer_id, 'vip_address': self.ref_lb2.vip_address, 'vip_network_id': self.ref_lb2.vip_network_id, 'additional_vips': self.ref_lb2.additional_vips, 'admin_state_up': self.ref_lb2.admin_state_up} expected_dict = { 'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': info} calls = [mock.call(expected_dict)] self.driver.loadbalancer_create(self.ref_lb2) self.mock_add_request.assert_has_calls(calls) def test_loadbalancer_create_member_without_subnet_id(self): self.ref_member.subnet_id = data_models.UnsetType() info = { 'id': self.ref_lb_fully_populated.loadbalancer_id, 'vip_address': self.ref_lb_fully_populated.vip_address, 'vip_network_id': self.ref_lb_fully_populated.vip_network_id, 'admin_state_up': self.ref_lb_fully_populated.admin_state_up} info_listener = { 'id': self.ref_listener.listener_id, 'protocol': self.ref_listener.protocol, 'protocol_port': self.ref_listener.protocol_port, 'default_pool_id': self.ref_listener.default_pool_id, 'admin_state_up': self.ref_listener.admin_state_up, 'loadbalancer_id': self.ref_listener.loadbalancer_id} info_pool = { 'id': self.ref_pool.pool_id, 'loadbalancer_id': self.ref_pool.loadbalancer_id, 'listener_id': self.ref_pool.listener_id, 'protocol': self.ref_pool.protocol, 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'admin_state_up': self.ref_pool.admin_state_up, 'session_persistence': {'type': 'SOURCE_IP'}} info_member = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_lb_fully_populated.vip_subnet_id, 'admin_state_up': self.ref_member.admin_state_up} info_dvr = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_lb_fully_populated.vip_subnet_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} expected_lb_dict = { 'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': info} expected_listener_dict = { 'type': ovn_const.REQ_TYPE_LISTENER_CREATE, 'info': info_listener} expected_pool_dict = { 'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': info_pool} expected_member_dict = { 'type': ovn_const.REQ_TYPE_MEMBER_CREATE, 'info': info_member} expected_dict_dvr = { 'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': info_dvr} calls = [mock.call(expected_lb_dict), mock.call(expected_listener_dict), mock.call(expected_pool_dict), mock.call(expected_member_dict), mock.call(expected_dict_dvr)] self.driver.loadbalancer_create(self.ref_lb_fully_populated) self.mock_add_request.assert_has_calls(calls) def test_loadbalancer_create_unset_listeners(self): self.ref_lb0.listeners = data_models.UnsetType() info = {'id': self.ref_lb0.loadbalancer_id, 'vip_address': self.ref_lb0.vip_address, 'vip_network_id': self.ref_lb0.vip_network_id, 'admin_state_up': False} expected_dict = { 'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': info} calls = [mock.call(expected_dict)] self.driver.loadbalancer_create(self.ref_lb0) self.mock_add_request.assert_has_calls(calls) def test_loadbalancer_create_unset_admin_state_up(self): self.ref_lb0.admin_state_up = data_models.UnsetType() info = {'id': self.ref_lb0.loadbalancer_id, 'vip_address': self.ref_lb0.vip_address, 'vip_network_id': self.ref_lb0.vip_network_id, 'admin_state_up': True} expected_dict = { 'type': ovn_const.REQ_TYPE_LB_CREATE, 'info': info} calls = [mock.call(expected_dict)] self.driver.loadbalancer_create(self.ref_lb0) self.mock_add_request.assert_has_calls(calls) def test_loadbalancer_update(self): info = {'id': self.ref_lb1.loadbalancer_id, 'admin_state_up': self.ref_lb1.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_LB_UPDATE, 'info': info} self.driver.loadbalancer_update(self.ref_lb0, self.ref_lb1) self.mock_add_request.assert_called_once_with(expected_dict) def test_loadbalancer_update_unset_admin_state_up(self): self.ref_lb1.admin_state_up = data_models.UnsetType() info = {'id': self.ref_lb1.loadbalancer_id} expected_dict = {'type': ovn_const.REQ_TYPE_LB_UPDATE, 'info': info} self.driver.loadbalancer_update(self.ref_lb0, self.ref_lb1) self.mock_add_request.assert_called_once_with(expected_dict) def test_loadbalancer_delete(self): info = {'id': self.ref_lb0.loadbalancer_id, 'cascade': False} expected_dict = {'type': ovn_const.REQ_TYPE_LB_DELETE, 'info': info} self.driver.loadbalancer_delete(self.ref_lb1) self.mock_add_request.assert_called_once_with(expected_dict) def test_loadbalancer_failover(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver.loadbalancer_failover, self.ref_lb0.loadbalancer_id) def test_pool_create_unsupported_protocol(self): self.ref_pool.protocol = 'HTTP' self.assertRaises(exceptions.UnsupportedOptionError, self.driver.pool_create, self.ref_pool) def test_pool_create_leastcount_algo(self): self.ref_pool.lb_algorithm = constants.LB_ALGORITHM_LEAST_CONNECTIONS self.assertRaises(exceptions.UnsupportedOptionError, self.driver.pool_create, self.ref_pool) def test_pool_create(self): info = {'id': self.ref_pool.pool_id, 'loadbalancer_id': self.ref_pool.loadbalancer_id, 'listener_id': self.ref_pool.listener_id, 'protocol': self.ref_pool.protocol, 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'admin_state_up': self.ref_pool.admin_state_up, 'session_persistence': {'type': 'SOURCE_IP'}} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': info} self.driver.pool_create(self.ref_pool) self.mock_add_request.assert_called_once_with(expected_dict) def test_pool_create_with_health_monitor(self): self.ref_pool.healthmonitor = self.ref_health_monitor info = {'id': self.ref_pool.pool_id, 'loadbalancer_id': self.ref_pool.loadbalancer_id, 'listener_id': self.ref_pool.listener_id, 'protocol': self.ref_pool.protocol, 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'admin_state_up': self.ref_pool.admin_state_up, 'session_persistence': {'type': 'SOURCE_IP'}} info_hm = {'id': self.ref_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'type': self.ref_health_monitor.type, 'interval': self.ref_health_monitor.delay, 'timeout': self.ref_health_monitor.timeout, 'failure_count': self.ref_health_monitor.max_retries_down, 'success_count': self.ref_health_monitor.max_retries, 'admin_state_up': self.ref_health_monitor.admin_state_up} expected_pool_dict = {'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': info} expected_hm_dict = {'type': ovn_const.REQ_TYPE_HM_CREATE, 'info': info_hm} calls = [mock.call(expected_pool_dict), mock.call(expected_hm_dict)] self.driver.pool_create(self.ref_pool) self.mock_add_request.assert_has_calls(calls) def test_pool_create_unset_admin_state_up(self): self.ref_pool.admin_state_up = data_models.UnsetType() info = {'id': self.ref_pool.pool_id, 'loadbalancer_id': self.ref_pool.loadbalancer_id, 'protocol': self.ref_pool.protocol, 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'listener_id': self.ref_pool.listener_id, 'admin_state_up': True, 'session_persistence': {'type': 'SOURCE_IP'}} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': info} self.driver.pool_create(self.ref_pool) self.mock_add_request.assert_called_once_with(expected_dict) def test_pool_create_unset_session_persistence(self): self.ref_pool.session_persistence = data_models.UnsetType() info = {'id': self.ref_pool.pool_id, 'loadbalancer_id': self.ref_pool.loadbalancer_id, 'protocol': self.ref_pool.protocol, 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'listener_id': self.ref_pool.listener_id, 'admin_state_up': self.ref_pool.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_CREATE, 'info': info} self.driver.pool_create(self.ref_pool) self.mock_add_request.assert_called_once_with(expected_dict) def test_pool_delete(self): # Pretent we don't have members self.ref_pool.members = [] info = {'id': self.ref_pool.pool_id, 'protocol': self.ref_pool.protocol, 'loadbalancer_id': self.ref_pool.loadbalancer_id} expected = {'type': ovn_const.REQ_TYPE_POOL_DELETE, 'info': info} self.driver.pool_delete(self.ref_pool) self.mock_add_request.assert_called_once_with(expected) def test_pool_delete_with_members_and_hm(self): self.ref_pool.healthmonitor = self.ref_health_monitor info = {'id': self.ref_pool.pool_id, 'protocol': self.ref_pool.protocol, 'loadbalancer_id': self.ref_pool.loadbalancer_id} expected = {'type': ovn_const.REQ_TYPE_POOL_DELETE, 'info': info} info_hm = {'id': self.ref_pool.healthmonitor.healthmonitor_id, 'pool_id': self.ref_pool.pool_id} info_member = {'id': self.ref_member.member_id, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'protocol_port': self.ref_member.protocol_port, 'address': self.ref_member.address} expected_hm = { 'type': ovn_const.REQ_TYPE_HM_DELETE, 'info': info_hm} expected_members = { 'type': ovn_const.REQ_TYPE_MEMBER_DELETE, 'info': info_member} expected_members_dvr = { 'type': ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR, 'info': mock.ANY} calls = [mock.call(expected_hm), mock.call(expected_members), mock.call(expected_members_dvr), mock.call(expected)] self.driver.pool_delete(self.ref_pool) self.mock_add_request.assert_has_calls(calls) def test_pool_update(self): info = {'id': self.ref_update_pool.pool_id, 'loadbalancer_id': self.ref_update_pool.loadbalancer_id, 'protocol': self.ref_pool.protocol, 'admin_state_up': self.ref_update_pool.admin_state_up, 'session_persistence': {'type': 'SOURCE_IP'}} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_UPDATE, 'info': info} self.driver.pool_update(self.ref_pool, self.ref_update_pool) self.mock_add_request.assert_called_once_with(expected_dict) def test_pool_update_unset_new_protocol(self): self.ref_update_pool.protocol = data_models.UnsetType() info = {'id': self.ref_update_pool.pool_id, 'loadbalancer_id': self.ref_update_pool.loadbalancer_id, 'protocol': self.ref_pool.protocol, 'admin_state_up': self.ref_update_pool.admin_state_up, 'session_persistence': {'type': 'SOURCE_IP'}} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_UPDATE, 'info': info} self.driver.pool_update(self.ref_pool, self.ref_update_pool) self.mock_add_request.assert_called_once_with(expected_dict) def test_pool_update_unset_new_lb_algorithm(self): self.ref_update_pool.lb_algorithm = data_models.UnsetType() info = {'id': self.ref_update_pool.pool_id, 'loadbalancer_id': self.ref_update_pool.loadbalancer_id, 'protocol': self.ref_pool.protocol, 'admin_state_up': self.ref_update_pool.admin_state_up, 'session_persistence': {'type': 'SOURCE_IP'}} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_UPDATE, 'info': info} self.driver.pool_update(self.ref_pool, self.ref_update_pool) self.mock_add_request.assert_called_once_with(expected_dict) def test_pool_update_unset_new_admin_state_up(self): self.ref_update_pool.admin_state_up = data_models.UnsetType() info = {'id': self.ref_update_pool.pool_id, 'loadbalancer_id': self.ref_update_pool.loadbalancer_id, 'protocol': self.ref_pool.protocol, 'session_persistence': {'type': 'SOURCE_IP'}} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_UPDATE, 'info': info} self.driver.pool_update(self.ref_pool, self.ref_update_pool) self.mock_add_request.assert_called_once_with(expected_dict) def test_pool_update_unset_new_session_timeout(self): self.ref_update_pool.session_persistence = data_models.UnsetType() info = {'id': self.ref_update_pool.pool_id, 'loadbalancer_id': self.ref_update_pool.loadbalancer_id, 'protocol': self.ref_pool.protocol, 'admin_state_up': self.ref_update_pool.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_POOL_UPDATE, 'info': info} self.driver.pool_update(self.ref_pool, self.ref_update_pool) self.mock_add_request.assert_called_once_with(expected_dict) @mock.patch.object(ovn_driver.OvnProviderDriver, '_check_member_monitor_options') @mock.patch.object(ovn_driver.OvnProviderDriver, '_ip_version_differs') def test_get_member_request_info_create_valid( self, mock_ip_version_differs, mock_check_monitor): mock_ip_version_differs.return_value = False result = self.driver._get_member_request_info( self.ref_member, create=True) mock_check_monitor.assert_called_once_with(self.ref_member) mock_ip_version_differs.assert_called_once_with(self.ref_member) expected_result = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'admin_state_up': self.ref_member.admin_state_up, } self.assertEqual(result, expected_result) @mock.patch.object(ovn_driver.OvnProviderDriver, '_validate_hm_support') def test_get_healthmonitor_request_info_with_admin_state_up( self, mock_validate): result = self.driver._get_healthmonitor_request_info( self.ref_health_monitor) expected = { 'id': self.ref_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'type': self.ref_health_monitor.type, 'interval': self.ref_health_monitor.delay, 'timeout': self.ref_health_monitor.timeout, 'failure_count': self.ref_health_monitor.max_retries_down, 'success_count': self.ref_health_monitor.max_retries, 'admin_state_up': self.ref_health_monitor.admin_state_up } self.assertEqual(result, expected) mock_validate.assert_called_once_with(self.ref_health_monitor) @mock.patch.object(ovn_driver.OvnProviderDriver, '_validate_hm_support') def test_get_healthmonitor_request_info_with_unset_admin_state_up( self, mock_validate): self.ref_health_monitor.admin_state_up = data_models.UnsetType() result = self.driver._get_healthmonitor_request_info( self.ref_health_monitor) expected = { 'id': self.ref_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'type': self.ref_health_monitor.type, 'interval': self.ref_health_monitor.delay, 'timeout': self.ref_health_monitor.timeout, 'failure_count': self.ref_health_monitor.max_retries_down, 'success_count': self.ref_health_monitor.max_retries, 'admin_state_up': True } self.assertEqual(result, expected) mock_validate.assert_called_once_with(self.ref_health_monitor) @mock.patch.object(ovn_driver.OvnProviderDriver, '_validate_hm_support') def test_get_healthmonitor_request_info_with_admin_state_down( self, mock_validate): self.ref_health_monitor.admin_state_up = False result = self.driver._get_healthmonitor_request_info( self.ref_health_monitor) expected = { 'id': self.ref_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'type': self.ref_health_monitor.type, 'interval': self.ref_health_monitor.delay, 'timeout': self.ref_health_monitor.timeout, 'failure_count': self.ref_health_monitor.max_retries_down, 'success_count': self.ref_health_monitor.max_retries, 'admin_state_up': self.ref_health_monitor.admin_state_up } self.assertEqual(result, expected) mock_validate.assert_called_once_with(self.ref_health_monitor) @mock.patch.object(ovn_driver.OvnProviderDriver, '_check_member_monitor_options') @mock.patch.object(ovn_driver.OvnProviderDriver, '_ip_version_differs') def test_ip_version_differs_raises_exception( self, mock_ip_version_differs, mock_check_monitor): mock_ip_version_differs.return_value = True self.assertRaises(ovn_exc.IPVersionsMixingNotSupportedError, self.driver._get_member_request_info, self.ref_member, create=True) mock_check_monitor.assert_called_once_with(self.ref_member) mock_ip_version_differs.assert_called_once_with(self.ref_member) def test_get_subnet_from_pool(self): self.ref_member.subnet_id = None self.driver._ovn_helper._get_subnet_from_pool.return_value = ( 'subnet_2', '10.0.0.0/24') self.driver._ovn_helper._check_ip_in_subnet.return_value = True result = self.driver._get_member_request_info(self.ref_member, create=True) self.driver._ovn_helper._get_subnet_from_pool.assert_called_once_with( self.ref_member.pool_id) self.driver._ovn_helper._check_ip_in_subnet.assert_called_once_with( self.ref_member.address, '10.0.0.0/24') expected_result = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': 'subnet_2', 'admin_state_up': self.ref_member.admin_state_up, } self.assertEqual(result, expected_result) def test_get_subnet_fails_raises_exception(self): self.ref_member.subnet_id = None self.driver._ovn_helper._get_subnet_from_pool.return_value = (None, None) self.assertRaises(exceptions.UnsupportedOptionError, self.driver._get_member_request_info, self.ref_member, create=True) self.driver._ovn_helper._get_subnet_from_pool.assert_called_once_with( self.ref_member.pool_id) def test_get_member_request_info_with_unset_type_admin_state_up(self): self.ref_member.admin_state_up = data_models.UnsetType() result = self.driver._get_member_request_info(self.ref_member, create=True) expected_result = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id, 'admin_state_up': True, } self.assertEqual(result, expected_result) def test_get_member_request_info_create_false(self): result = self.driver._get_member_request_info(self.ref_member, create=False) expected_result = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': self.ref_member.subnet_id } self.assertEqual(result, expected_result) def test_get_member_request_info_with_unset_type_subnet(self): self.ref_member.subnet_id = data_models.UnsetType() self.driver._ovn_helper._get_subnet_from_pool.return_value = ( 'subnet_2', '10.0.0.0/24') self.driver._ovn_helper._check_ip_in_subnet.return_value = True result = self.driver._get_member_request_info(self.ref_member, create=True) expected_result = { 'id': self.ref_member.member_id, 'address': self.ref_member.address, 'protocol_port': self.ref_member.protocol_port, 'pool_id': self.ref_member.pool_id, 'subnet_id': 'subnet_2', 'admin_state_up': self.ref_member.admin_state_up, } self.assertEqual(result, expected_result) def test_create_vip_port(self): with mock.patch.object(clients, 'get_neutron_client'): port_dict, add_vip_dicts = ( self.driver.create_vip_port(self.loadbalancer_id, self.project_id, self.vip_dict, [])) self.assertIsNotNone(port_dict.pop('vip_address', None)) self.assertIsNotNone(port_dict.pop('vip_port_id', None)) self.assertEqual(len(add_vip_dicts), 0) # The network_driver function is mocked, therefore the # created port vip_address and vip_port_id are also mocked. # Check if it exists and move on. # The finally output is include vip_address, vip_port_id, # vip_network_id and vip_subnet_id. for key, value in port_dict.items(): self.assertEqual(value, self.vip_output[key]) def test_create_vip_port_additional_vips(self): with mock.patch.object(clients, 'get_neutron_client'): port_dict, add_vip_dicts = ( self.driver.create_vip_port(self.loadbalancer_id, self.project_id, self.vip_dict, self.additional_vips)) self.assertIsNotNone(port_dict.pop('vip_address', None)) self.assertIsNotNone(port_dict.pop('vip_port_id', None)) self.assertIsNotNone(add_vip_dicts) self.assertEqual(len(add_vip_dicts), 1) # The network_driver function is mocked, therefore the # created port vip_address and vip_port_id are also mocked. # Check if it exists and move on. # The finally output is include vip_address, vip_port_id, # vip_network_id and vip_subnet_id. for key, value in port_dict.items(): self.assertEqual(value, self.vip_output[key]) def test_create_vip_port_exception(self): with mock.patch.object(clients, 'get_neutron_client', side_effect=[RuntimeError]): self.assertRaises( exceptions.DriverError, self.driver.create_vip_port, self.loadbalancer_id, self.project_id, self.vip_dict, []) def test_health_monitor_create(self): info = {'id': self.ref_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'type': self.ref_health_monitor.type, 'interval': self.ref_health_monitor.delay, 'timeout': self.ref_health_monitor.timeout, 'failure_count': self.ref_health_monitor.max_retries_down, 'success_count': self.ref_health_monitor.max_retries, 'admin_state_up': self.ref_health_monitor.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_HM_CREATE, 'info': info} self.driver.health_monitor_create(self.ref_health_monitor) self.mock_add_request.assert_called_once_with(expected_dict) @mock.patch.object(ovn_driver.OvnProviderDriver, '_is_health_check_supported') def test_health_monitor_create_not_supported(self, ihcs): ihcs.return_value = False self.assertRaises(exceptions.UnsupportedOptionError, self.driver.health_monitor_create, self.ref_health_monitor) def test_health_monitor_create_failure(self): self.assertRaises(exceptions.UnsupportedOptionError, self.driver.health_monitor_create, self.fail_health_monitor) def test_health_monitor_create_failure_unset_type(self): self.fail_health_monitor.type = data_models.UnsetType() self.assertRaises(exceptions.UnsupportedOptionError, self.driver.health_monitor_create, self.fail_health_monitor) def test_health_monitor_create_unset_admin_state_up(self): self.ref_health_monitor.admin_state_up = data_models.UnsetType() info = {'id': self.ref_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'type': self.ref_health_monitor.type, 'interval': self.ref_health_monitor.delay, 'timeout': self.ref_health_monitor.timeout, 'failure_count': self.ref_health_monitor.max_retries_down, 'success_count': self.ref_health_monitor.max_retries, 'admin_state_up': True} expected_dict = {'type': ovn_const.REQ_TYPE_HM_CREATE, 'info': info} self.driver.health_monitor_create(self.ref_health_monitor) self.mock_add_request.assert_called_once_with(expected_dict) def test_health_monitor_update(self): info = {'id': self.ref_update_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'interval': self.ref_update_health_monitor.delay, 'timeout': self.ref_update_health_monitor.timeout, 'failure_count': self.ref_update_health_monitor.max_retries_down, 'success_count': self.ref_update_health_monitor.max_retries, 'admin_state_up': self.ref_update_health_monitor.admin_state_up} expected_dict = {'type': ovn_const.REQ_TYPE_HM_UPDATE, 'info': info} self.driver.health_monitor_update(self.ref_health_monitor, self.ref_update_health_monitor) self.mock_add_request.assert_called_once_with(expected_dict) def test_health_monitor_update_unset_admin_state_up(self): self.ref_update_health_monitor.admin_state_up = data_models.UnsetType() info = {'id': self.ref_update_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id, 'interval': self.ref_update_health_monitor.delay, 'timeout': self.ref_update_health_monitor.timeout, 'failure_count': self.ref_update_health_monitor.max_retries_down, 'success_count': self.ref_update_health_monitor.max_retries, 'admin_state_up': True} expected_dict = {'type': ovn_const.REQ_TYPE_HM_UPDATE, 'info': info} self.driver.health_monitor_update(self.ref_health_monitor, self.ref_update_health_monitor) self.mock_add_request.assert_called_once_with(expected_dict) def test_health_monitor_delete(self): info = {'id': self.ref_health_monitor.healthmonitor_id, 'pool_id': self.ref_health_monitor.pool_id} expected_dict = {'type': ovn_const.REQ_TYPE_HM_DELETE, 'info': info} self.driver.health_monitor_delete(self.ref_health_monitor) self.mock_add_request.assert_called_once_with(expected_dict) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test_ensure_loadbalancer_lb_not_found( self, mock_lb_create, mock_listener_create, mock_pool_create, mock_member_create, mock_update_status): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] mock_lb_create.return_value = {constants.LOADBALANCERS: [ {'id': self.loadbalancer_id}]} mock_listener_create.return_value = {constants.LISTENERS: [ {'id': self.listener_id}]} mock_pool_create.return_value = {constants.POOLS: [ {'id': self.pool_id}]} mock_member_create.return_value = {constants.MEMBERS: [ {'id': self.member_id}]} self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_create.assert_called_once_with( self.driver._get_loadbalancer_request_info( self.ref_lb_fully_populated), ) mock_listener_create.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), ) mock_pool_create.assert_called_once_with( self.driver._get_pool_request_info( self.ref_lb_fully_populated.pools[0]), ) mock_member_create.assert_called_once_with( self.driver._get_member_request_info( self.ref_lb_fully_populated.pools[0].members[0]), ) expected_status = { constants.LOADBALANCERS: [{'id': self.loadbalancer_id}], constants.LISTENERS: [{'id': self.listener_id}], constants.POOLS: [{'id': self.pool_id}], constants.MEMBERS: [{'id': self.member_id}] } mock_update_status.assert_called_once_with(expected_status) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test_ensure_loadbalancer_lb_not_found_without_listeners_or_pools( self, mock_lb_create, mock_listener_create, mock_pool_create, mock_update_status): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] self.ref_lb_fully_populated.listeners = data_models.UnsetType() self.ref_lb_fully_populated.pools = data_models.UnsetType() mock_lb_create.return_value = {constants.LOADBALANCERS: [ {'id': self.loadbalancer_id}]} mock_listener_create.return_value = {} mock_pool_create.return_value = {} self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_create.assert_called_once_with( self.driver._get_loadbalancer_request_info( self.ref_lb_fully_populated), ) mock_listener_create.assert_not_called() mock_pool_create.assert_not_called() expected_status = { constants.LOADBALANCERS: [{'id': self.loadbalancer_id}] } mock_update_status.assert_called_once_with(expected_status) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test_ensure_loadbalancer_lb_not_found_member_without_subnet( self, mock_lb_create, mock_listener_create, mock_pool_create, mock_member_create, mock_update_status): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] mock_lb_create.return_value = {constants.LOADBALANCERS: [ {'id': self.loadbalancer_id}]} mock_listener_create.return_value = {constants.LISTENERS: [ {'id': self.listener_id}]} mock_pool_create.return_value = {constants.POOLS: [ {'id': self.pool_id}]} mock_member_create.return_value = {constants.MEMBERS: [ {'id': self.member_id}]} self.ref_lb_fully_populated.listeners = [] self.ref_lb_fully_populated.pools[0].members[0].subnet_id = None self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_member_create.assert_called_once_with( self.driver._get_member_request_info( self.ref_lb_fully_populated.pools[0].members[0]), ) expected_status = { constants.LOADBALANCERS: [{'id': self.loadbalancer_id}], constants.LISTENERS: [], constants.POOLS: [{'id': self.pool_id}], constants.MEMBERS: [{'id': self.member_id}] } mock_update_status.assert_called_once_with(expected_status) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, 'hm_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test_ensure_loadbalancer_lb_not_found_hm_found( self, mock_lb_create, mock_listener_create, mock_pool_create, mock_member_create, mock_hm_create, mock_update_status): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] mock_lb_create.return_value = {constants.LOADBALANCERS: [ {'id': self.loadbalancer_id}]} mock_listener_create.return_value = {constants.LISTENERS: [ {'id': self.listener_id}]} mock_pool_create.return_value = {constants.POOLS: [ {'id': self.pool_id}]} mock_member_create.return_value = {constants.MEMBERS: [ {'id': self.member_id}]} with mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') \ as mock_find_ovn_lb_from_hm_id: mock_find_ovn_lb_from_hm_id.return_value = (mock.ANY, self.ovn_lb) self.ref_pool.healthmonitor = self.ref_health_monitor self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_hm_create.assert_not_called() expected_status = { constants.LOADBALANCERS: [{'id': self.loadbalancer_id}], constants.LISTENERS: [{'id': self.listener_id}], constants.POOLS: [{'id': self.pool_id}], constants.MEMBERS: [{'id': self.member_id}], constants.HEALTHMONITORS: [] } mock_update_status.assert_called_once_with(expected_status) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, 'hm_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test_ensure_loadbalancer_lb_not_found_hm_lbhc_not_found( self, mock_lb_create, mock_listener_create, mock_pool_create, mock_member_create, mock_hm_create, mock_update_status): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] mock_lb_create.return_value = {constants.LOADBALANCERS: [ {'id': self.loadbalancer_id}]} mock_listener_create.return_value = {constants.LISTENERS: [ {'id': self.listener_id}]} mock_pool_create.return_value = {constants.POOLS: [ {'id': self.pool_id}]} mock_member_create.return_value = {constants.MEMBERS: [ {'id': self.member_id}]} mock_hm_create.return_value = {constants.HEALTHMONITORS: [ {'id': self.healthmonitor_id}]} with mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') \ as mock_find_ovn_lb_from_hm_id: mock_find_ovn_lb_from_hm_id.return_value = (None, None) self.ref_pool.healthmonitor = self.ref_health_monitor self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_hm_create.assert_called_once_with( self.driver._get_healthmonitor_request_info( self.ref_lb_fully_populated.pools[0].healthmonitor), ) expected_status = { constants.LOADBALANCERS: [{'id': self.loadbalancer_id}], constants.LISTENERS: [{'id': self.listener_id}], constants.POOLS: [{'id': self.pool_id}], constants.MEMBERS: [{'id': self.member_id}], constants.HEALTHMONITORS: [{'id': self.healthmonitor_id}] } mock_update_status.assert_called_once_with(expected_status) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test_ensure_loadbalancer_lb_not_found_without_pools( self, mock_lb_create, mock_listener_create, mock_pool_create, mock_member_create, mock_update_status): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] self.ref_lb_fully_populated.pools = data_models.Unset mock_lb_create.return_value = {constants.LOADBALANCERS: [ {'id': self.loadbalancer_id}]} mock_listener_create.return_value = {constants.LISTENERS: [ {'id': self.listener_id}]} self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_create.assert_called_once_with( self.driver._get_loadbalancer_request_info( self.ref_lb_fully_populated), ) mock_listener_create.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), ) mock_pool_create.assert_not_called() expected_status = { constants.LOADBALANCERS: [{'id': self.loadbalancer_id}], constants.LISTENERS: [{'id': self.listener_id}] } mock_update_status.assert_called_once_with(expected_status) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test_ensure_loadbalancer_lb_not_found_without_members( self, mock_lb_create, mock_listener_create, mock_pool_create, mock_member_create, mock_update_status): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] self.ref_lb_fully_populated.pools[0].members = [] mock_lb_create.return_value = {constants.LOADBALANCERS: [ {'id': self.loadbalancer_id}]} mock_listener_create.return_value = {constants.LISTENERS: [ {'id': self.listener_id}]} mock_pool_create.return_value = {constants.POOLS: [ {'id': self.pool_id}]} self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_create.assert_called_once_with( self.driver._get_loadbalancer_request_info( self.ref_lb_fully_populated), ) mock_listener_create.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), ) mock_pool_create.assert_called_once_with( self.driver._get_pool_request_info( self.ref_lb_fully_populated.pools[0]), ) mock_member_create.assert_not_called() expected_status = { constants.LOADBALANCERS: [{'id': self.loadbalancer_id}], constants.LISTENERS: [{'id': self.listener_id}], constants.POOLS: [{'id': self.pool_id}] } mock_update_status.assert_called_once_with(expected_status) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, 'hm_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test_ensure_loadbalancer_lb_not_found_with_hm( self, mock_lb_create, mock_listener_create, mock_pool_create, mock_member_create, mock_hm_create, mock_update_status): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] self.ref_lb_fully_populated.pools[0].members = [] self.ref_pool.healthmonitor = self.ref_health_monitor mock_lb_create.return_value = {constants.LOADBALANCERS: [ {'id': self.loadbalancer_id}]} mock_listener_create.return_value = {constants.LISTENERS: [ {'id': self.listener_id}]} mock_pool_create.return_value = {constants.POOLS: [ {'id': self.pool_id}]} mock_member_create.return_value = {constants.MEMBERS: [ {'id': self.member_id}]} mock_hm_create.return_value = {constants.HEALTHMONITORS: [ {'id': self.healthmonitor_id}]} self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_create.assert_called_once_with( self.driver._get_loadbalancer_request_info( self.ref_lb_fully_populated), ) mock_listener_create.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), ) mock_pool_create.assert_called_once_with( self.driver._get_pool_request_info( self.ref_lb_fully_populated.pools[0]), ) mock_member_create.assert_not_called() mock_hm_create.assert_called_once_with( self.driver._get_healthmonitor_request_info( self.ref_lb_fully_populated.pools[0].healthmonitor), ) expected_status = { constants.LOADBALANCERS: [{'id': self.loadbalancer_id}], constants.LISTENERS: [{'id': self.listener_id}], constants.POOLS: [{'id': self.pool_id}], constants.HEALTHMONITORS: [{'id': self.healthmonitor_id}] } mock_update_status.assert_called_once_with(expected_status) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test_ensure_loadbalancer_lb_not_found_no_listeners( self, mock_lb_create, mock_listener_create, mock_pool_create, mock_member_create, mock_update_status): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] self.ref_lb_fully_populated.listeners = [] mock_lb_create.return_value = {constants.LOADBALANCERS: [ {'id': self.loadbalancer_id}]} mock_pool_create.return_value = {constants.POOLS: [ {'id': self.pool_id}]} mock_member_create.return_value = {constants.MEMBERS: [ {'id': self.member_id}]} self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_create.assert_called_once_with( self.driver._get_loadbalancer_request_info( self.ref_lb_fully_populated), ) mock_listener_create.assert_not_called() mock_pool_create.assert_called_once_with( self.driver._get_pool_request_info( self.ref_lb_fully_populated.pools[0]), ) mock_member_create.assert_called_once_with( self.driver._get_member_request_info( self.ref_lb_fully_populated.pools[0].members[0]), ) expected_status = { constants.LOADBALANCERS: [{'id': self.loadbalancer_id}], constants.LISTENERS: [], constants.POOLS: [{'id': self.pool_id}], constants.MEMBERS: [{'id': self.member_id}], } mock_update_status.assert_called_once_with(expected_status) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_create') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test_ensure_loadbalancer_lb_not_found_no_pools( self, mock_lb_create, mock_listener_create, mock_pool_create, mock_member_create, mock_update_status): self.mock_find_ovn_lbs_with_retry.side_effect = [ idlutils.RowNotFound] self.ref_lb_fully_populated.pools = [] mock_lb_create.return_value = {constants.LOADBALANCERS: [ {'id': self.loadbalancer_id}]} mock_listener_create.return_value = {constants.LISTENERS: [ {'id': self.listener_id}]} self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_create.assert_called_once_with( self.driver._get_loadbalancer_request_info( self.ref_lb_fully_populated), ) mock_listener_create.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), ) mock_pool_create.assert_not_called() mock_member_create.assert_not_called() expected_status = { constants.LOADBALANCERS: [{'id': self.loadbalancer_id}], constants.LISTENERS: [{'id': self.listener_id}], constants.POOLS: [] } mock_update_status.assert_called_once_with(expected_status) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_current_operating_statuses') @mock.patch.object(ovn_helper.OvnProviderHelper, 'hm_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_sync') def test_ensure_loadbalancer_lb_found( self, mock_lb_sync, mock_listener_sync, mock_pool_sync, mock_member_sync, mock_hm_sync, mock_get_status, mock_update_status): self.mock_find_ovn_lbs_with_retry.return_value = [ self.ovn_lb] self.ref_pool.healthmonitor = self.ref_health_monitor self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_sync.assert_called_with( self.driver._get_loadbalancer_request_info( self.ref_lb_fully_populated), self.ovn_lb ) mock_listener_sync.assert_called_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), self.ovn_lb ) mock_listener_sync.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), self.ovn_lb ) mock_pool_sync.assert_called_once_with( self.driver._get_pool_request_info( self.ref_lb_fully_populated.pools[0]), self.ovn_lb ) mock_member_sync.assert_called_once_with( self.driver._get_member_request_info( self.ref_lb_fully_populated.pools[0].members[0]), mock.ANY, f"pool_{self.ref_lb_fully_populated.pools[0].pool_id}" ) mock_hm_sync.assert_called_once_with( self.driver._get_healthmonitor_request_info( self.ref_lb_fully_populated.pools[0].healthmonitor), mock.ANY, f"pool_{self.ref_lb_fully_populated.pools[0].pool_id}" ) mock_get_status.assert_called_with(self.ovn_lb) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_current_operating_statuses') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_sync') def test_ensure_loadbalancer_lb_found_no_listeners_no_pools( self, mock_lb_sync, mock_listener_sync, mock_pool_sync, mock_get_status, mock_update_status): self.mock_find_ovn_lbs_with_retry.return_value = [ self.ovn_lb] self.ref_lb_fully_populated.listeners = data_models.Unset self.ref_lb_fully_populated.pools = data_models.Unset self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_sync.assert_called_with( self.driver._get_loadbalancer_request_info(self.ref_lb0), self.ovn_lb ) mock_listener_sync.assert_not_called() mock_pool_sync.assert_not_called() mock_get_status.assert_called_with(self.ovn_lb) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_current_operating_statuses') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_sync') def test_ensure_loadbalancer_lb_found_no_pools( self, mock_lb_sync, mock_listener_sync, mock_pool_sync, mock_get_status, mock_update_status): self.mock_find_ovn_lbs_with_retry.return_value = [ self.ovn_lb] self.ref_lb_fully_populated.pools = data_models.Unset self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_listener_sync.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), self.ovn_lb ) mock_lb_sync.assert_called_with( self.driver._get_loadbalancer_request_info(self.ref_lb0), self.ovn_lb ) mock_pool_sync.assert_not_called() mock_get_status.assert_called_with(self.ovn_lb) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_current_operating_statuses') @mock.patch.object(ovn_helper.OvnProviderHelper, 'hm_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_sync') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_ensure_loadbalancer_lb_found_no_hm( self, net_cli, mock_lb_sync, mock_listener_sync, mock_pool_sync, mock_member_sync, mock_hm_sync, mock_get_status, mock_update_status): self.mock_find_ovn_lbs_with_retry.return_value = [ self.ovn_lb] self.ref_pool_with_hm.healthmonitor = None self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_sync.assert_called_with( self.driver._get_loadbalancer_request_info(self.ref_lb0), self.ovn_lb ) mock_listener_sync.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), self.ovn_lb ) mock_pool_sync.assert_called_once_with( self.driver._get_pool_request_info( self.ref_lb_fully_populated.pools[0]), self.ovn_lb ) mock_hm_sync.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_current_operating_statuses') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_sync') def test_ensure_loadbalancer_lb_found_no_members( self, mock_lb_sync, mock_listener_sync, mock_pool_sync, mock_member_sync, mock_get_status, mock_update_status): self.mock_find_ovn_lbs_with_retry.return_value = [ self.ovn_lb] self.ovn_lb.external_ids.pop('pool_%s' % self.pool_id) self.ref_lb_fully_populated.pools[0].members = data_models.Unset self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_sync.assert_called_with( self.driver._get_loadbalancer_request_info(self.ref_lb0), self.ovn_lb ) mock_listener_sync.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), self.ovn_lb ) mock_pool_sync.assert_called_once_with( self.driver._get_pool_request_info( self.ref_lb_fully_populated.pools[0]), self.ovn_lb) mock_member_sync.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_current_operating_statuses') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_sync') def test_ensure_loadbalancer_lb_found_member_without_subnet( self, mock_lb_sync, mock_listener_sync, mock_pool_sync, mock_member_sync, mock_get_status, mock_update_status): self.mock_find_ovn_lbs_with_retry.return_value = [ self.ovn_lb] self.ref_lb_fully_populated.listeners = [] self.ref_lb_fully_populated.pools[0].members[0].subnet_id = None self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_member_sync.assert_called_once_with( self.driver._get_member_request_info( self.ref_lb_fully_populated.pools[0].members[0]), mock.ANY, f"pool_{self.ref_lb_fully_populated.pools[0].pool_id}" ) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_current_operating_statuses') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_sync') def test_ensure_loadbalancer_lb_found_no_members_to_delete( self, mock_lb_sync, mock_listener_sync, mock_pool_sync, mock_member_sync, mock_get_status, mock_update_status): self.mock_find_ovn_lbs_with_retry.return_value = [ self.ovn_lb] self.ovn_lb.external_ids.pop('pool_%s' % self.pool_id) self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_sync.assert_called_with( self.driver._get_loadbalancer_request_info(self.ref_lb0), self.ovn_lb ) mock_listener_sync.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), self.ovn_lb ) mock_pool_sync.assert_called_once_with( self.driver._get_pool_request_info( self.ref_lb_fully_populated.pools[0]), self.ovn_lb) mock_member_sync.assert_called_once_with( self.driver._get_member_request_info( self.ref_lb_fully_populated.pools[0].members[0]), mock.ANY, f"pool_{self.ref_lb_fully_populated.pools[0].pool_id}" ) @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_status_to_octavia') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_current_operating_statuses') @mock.patch.object(ovn_helper.OvnProviderHelper, 'handle_member_dvr') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_delete') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_members_in_ovn_lb') @mock.patch.object(ovn_helper.OvnProviderHelper, 'member_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'pool_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'listener_sync') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_sync') def test_ensure_loadbalancer_lb_found_one_member_to_delete( self, mock_lb_sync, mock_listener_sync, mock_pool_sync, mock_member_sync, mock_get_members_in_ovn_lb, mock_member_delete, mock_handle_member_dvr, mock_get_status, mock_update_status): self.mock_find_ovn_lbs_with_retry.return_value = [ self.ovn_lb] mock_get_members_in_ovn_lb.return_value = [ ['ip', 'port', 'subnet', 'foo'] ] self.ovn_lb.external_ids.pop('pool_%s' % self.pool_id) self.driver._ensure_loadbalancer(self.ref_lb_fully_populated) mock_lb_sync.assert_called_with( self.driver._get_loadbalancer_request_info(self.ref_lb0), self.ovn_lb ) mock_listener_sync.assert_called_once_with( self.driver._get_listener_request_info( self.ref_lb_fully_populated.listeners[0]), self.ovn_lb ) mock_pool_sync.assert_called_once_with( self.driver._get_pool_request_info( self.ref_lb_fully_populated.pools[0]), self.ovn_lb) mock_member_sync.assert_called_once_with( self.driver._get_member_request_info( self.ref_lb_fully_populated.pools[0].members[0]), mock.ANY, f"pool_{self.ref_lb_fully_populated.pools[0].pool_id}" ) mock_member_delete.assert_called_once_with({ 'id': 'foo', 'subnet_id': 'subnet'} ) @mock.patch.object(ovn_helper.OvnProviderHelper, 'get_octavia_lbs') @mock.patch.object(clients, 'get_octavia_client') def test_do_sync_no_loadbalancers(self, mock_get_octavia_client, mock_get_octavia_lbs, ): mock_get_octavia_lbs.return_value = [] lb_filters = {} with mock.patch.object(clients, 'get_neutron_client', side_effect=RuntimeError), \ mock.patch.object(self.driver, '_ensure_loadbalancer') as mock_ensure_lb: self.driver.do_sync(**lb_filters) mock_ensure_lb.assert_not_called() @mock.patch.object(data_models.HealthMonitor, 'from_dict') @mock.patch.object(data_models.Member, 'from_dict') @mock.patch.object(data_models.Listener, 'from_dict') @mock.patch.object(data_models.Pool, 'from_dict') @mock.patch.object(o_driver_lib.DriverLibrary, 'get_loadbalancer') @mock.patch.object(ovn_helper.OvnProviderHelper, 'get_octavia_lbs') @mock.patch.object(clients, 'get_octavia_client') def test_do_sync_with_loadbalancers(self, mock_get_octavia_client, mock_get_octavia_lbs, mock_get_loadbalancer, mock_pool_from_dict, mock_listener_from_dict, mock_member_from_dict, mock_hm_from_dict): lb = mock.MagicMock(id=self.ref_lb_fully_sync_populated.name) mock_get_octavia_lbs.return_value = [lb] mock_get_loadbalancer.return_value = self.ref_lb_fully_sync_populated mock_pool_from_dict.return_value = self.ref_pool_with_hm mock_listener_from_dict.return_value = self.ref_listener mock_member_from_dict.return_value = self.ref_member mock_hm_from_dict.return_value = self.ref_health_monitor lb_filters = {} with mock.patch.object(self.driver, '_ensure_loadbalancer') \ as mock_ensure_lb: self.driver.do_sync(**lb_filters) mock_ensure_lb.assert_any_call( self.ref_lb_fully_sync_populated) @mock.patch.object(data_models.HealthMonitor, 'from_dict') @mock.patch.object(data_models.Member, 'from_dict') @mock.patch.object(data_models.Listener, 'from_dict') @mock.patch.object(data_models.Pool, 'from_dict') @mock.patch.object(o_driver_lib.DriverLibrary, 'get_loadbalancer') @mock.patch.object(ovn_helper.OvnProviderHelper, 'get_octavia_lbs') @mock.patch.object(clients, 'get_octavia_client') def test_do_sync_with_loadbalancer_no_members_no_hm( self, mock_get_octavia_client, mock_get_octavia_lbs, mock_get_loadbalancer, mock_pool_from_dict, mock_listener_from_dict, mock_member_from_dict, mock_hm_from_dict): self.ref_pool_with_hm.members = [] self.ref_lb_fully_sync_populated.pools[0].healthmonitor = None lb = mock.MagicMock(id=self.ref_lb_fully_sync_populated.name) mock_get_octavia_lbs.return_value = [lb] mock_get_loadbalancer.return_value = self.ref_lb_fully_sync_populated mock_pool_from_dict.return_value = self.ref_pool_with_hm mock_listener_from_dict.return_value = self.ref_listener lb_filters = {} with mock.patch.object(self.driver, '_ensure_loadbalancer') \ as mock_ensure_lb: self.driver.do_sync(**lb_filters) mock_ensure_lb.assert_any_call( self.ref_lb_fully_sync_populated) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/test_hacking.py0000664000175100017510000000133715033037524027363 0ustar00mylesmyles# Copyright 2020 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base class NothingTestCase(base.BaseTestCase): """Nothing test class""" def test_nothing(self): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/test_helper.py0000664000175100017510000116600115033037524027237 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections import copy from unittest import mock from neutron_lib import constants as n_const from neutronclient.common import exceptions as n_exc from octavia_lib.api.drivers import data_models from octavia_lib.api.drivers import exceptions from octavia_lib.common import constants import openstack from oslo_serialization import jsonutils from oslo_utils import uuidutils from ovsdbapp.backend.ovs_idl import idlutils from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import config as ovn_conf from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider import event as ovn_event from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.tests.unit import base as ovn_base from ovn_octavia_provider.tests.unit import fakes Port = collections.namedtuple('Port', 'id, name, network_id, fixed_ips', defaults=[None, '', None, []]) class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase): def setUp(self): super().setUp() ovn_conf.register_opts() self.helper = ovn_helper.OvnProviderHelper() self.real_helper_find_ovn_lb_with_pool_key = ( self.helper._find_ovn_lb_with_pool_key) mock.patch.object(self.helper, '_update_status_to_octavia').start() self.octavia_driver_lib = mock.patch.object( self.helper, '_octavia_driver_lib').start() self.listener = {'id': self.listener_id, 'loadbalancer_id': self.loadbalancer_id, 'protocol': 'TCP', 'protocol_port': 80, 'default_pool_id': self.pool_id, 'admin_state_up': False} self.lb = {'id': self.loadbalancer_id, 'vip_address': self.vip_address, 'cascade': False, 'vip_network_id': self.vip_network_id, 'admin_state_up': False} self.ports = [Port( fixed_ips=[{'ip_address': self.vip_address, 'subnet_id': uuidutils.generate_uuid()}], network_id=self.vip_network_id, id=self.port1_id)] self.pool = {'id': self.pool_id, 'loadbalancer_id': self.loadbalancer_id, 'listener_id': self.listener_id, 'protocol': 'TCP', 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'admin_state_up': False} self.member = {'id': self.member_id, 'address': self.member_address, 'protocol_port': self.member_port, 'subnet_id': self.member_subnet_id, 'pool_id': self.member_pool_id, 'admin_state_up': True, 'old_admin_state_up': True} self.health_monitor = {'id': self.healthmonitor_id, 'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_TCP, 'interval': 6, 'timeout': 7, 'failure_count': 5, 'success_count': 3, 'admin_state_up': True} self.health_mon_udp = {'id': self.healthmonitor_id, 'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_UDP_CONNECT, 'interval': 6, 'timeout': 7, 'failure_count': 5, 'success_count': 3, 'admin_state_up': True} self.ovn_nbdb_api = mock.patch.object(self.helper, 'ovn_nbdb_api') self.ovn_nbdb_api.start() add_req_thread = mock.patch.object(ovn_helper.OvnProviderHelper, 'add_request') self.mock_add_request = add_req_thread.start() self.ovn_lb = mock.MagicMock() self.ovn_lb.protocol = ['tcp'] self.ovn_lb.uuid = uuidutils.generate_uuid() self.ovn_lb.health_check = [] self.ovn_lb.selection_fields = ['ip_dst', 'ip_src', 'tp_dst', 'tp_src'] self.ovn_hm_lb = mock.MagicMock() self.ovn_hm_lb.protocol = ['tcp'] self.ovn_hm_lb.uuid = uuidutils.generate_uuid() self.ovn_hm = mock.MagicMock() self.ovn_hm.uuid = self.healthmonitor_id self.ovn_hm.external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: self.ovn_hm.uuid, ovn_const.LB_EXT_IDS_HM_POOL_KEY: self.pool_id, ovn_const.LB_EXT_IDS_HM_VIP: '10.22.33.99'} self.ovn_hm_lb.health_check = [self.ovn_hm.uuid] self.member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port', 'enabled': True, 'pool_%s' % self.pool_id: self.member_line, 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id, ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}' % (self.member_id, constants.NO_MONITOR)} self.ovn_hm_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.99', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.99', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_hm_port', ovn_const.LB_EXT_IDS_HMS_KEY: '["%s"]' % (self.ovn_hm.uuid), 'enabled': True, 'pool_%s' % self.pool_id: '', 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id, ovn_const.OVN_MEMBER_STATUS_KEY: '{}'} self.helper.ovn_nbdb_api.db_find.return_value.\ execute.return_value = [self.ovn_lb] self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.return_value = [self.ovn_lb] self.mock_find_lb_pool_key = mock.patch.object( self.helper, '_find_ovn_lb_with_pool_key', return_value=self.ovn_lb).start() self.mock_find_ovn_lbs = mock.patch.object( ovn_helper.OvnProviderHelper, '_find_ovn_lbs', side_effect=lambda x, protocol=None: self.ovn_lb if protocol else [self.ovn_lb]) self.mock_find_ovn_lbs.start() self._get_pool_listeners = mock.patch.object( self.helper, '_get_pool_listeners', return_value=[]) self._get_pool_listeners.start() self._update_lb_to_ls_association = mock.patch.object( self.helper, '_update_lb_to_ls_association', return_value=[]) self._update_lb_to_ls_association.start() self._get_lb_to_ls_association_commands = mock.patch.object( self.helper, '_get_lb_to_ls_association_commands', return_value=[]) self._get_lb_to_ls_association_commands.start() self._update_lb_to_lr_association = mock.patch.object( self.helper, '_update_lb_to_lr_association', return_value=[]) self._update_lb_to_lr_association.start() self._get_lb_to_lr_association_commands = mock.patch.object( self.helper, '_get_lb_to_lr_association_commands', return_value=[]) self._get_lb_to_lr_association_commands.start() self._update_lb_to_lr_association_by_step = \ mock.patch.object( self.helper, '_update_lb_to_lr_association_by_step', return_value=[]) self._update_lb_to_lr_association_by_step.start() # NOTE(mjozefcz): Create foo router and network. net_id = uuidutils.generate_uuid() net2_id = uuidutils.generate_uuid() router_id = uuidutils.generate_uuid() self.ref_lb1 = fakes.FakeLB( uuid=uuidutils.generate_uuid(), admin_state_up=True, listeners=[], loadbalancer_id=self.loadbalancer_id, name='favorite_lb1', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id, ext_ids={ ovn_const.LB_EXT_IDS_LR_REF_KEY: 'neutron-%s' % net_id, ovn_const.LB_EXT_IDS_LS_REFS_KEY: '{\"neutron-%s\": 1}' % net_id, ovn_const.LB_EXT_IDS_VIP_KEY: self.vip_address}) self.ref_lb2 = fakes.FakeLB( uuid=uuidutils.generate_uuid(), admin_state_up=True, listeners=[], loadbalancer_id=self.loadbalancer_id, name='favorite_lb2', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id, ext_ids={ ovn_const.LB_EXT_IDS_LR_REF_KEY: 'neutron-%s' % net_id, ovn_const.LB_EXT_IDS_LS_REFS_KEY: '{\"neutron-%s\": 1}' % net_id, ovn_const.LB_EXT_IDS_VIP_KEY: self.vip_address}) # TODO(mjozefcz): Consider using FakeOVNRouter. self.router = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'load_balancer': [self.ref_lb1], 'name': 'neutron-%s' % router_id, 'ports': []}) # TODO(mjozefcz): Consider using FakeOVNSwitch. self.network = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'load_balancer': [self.ref_lb2], 'name': 'neutron-%s' % net_id, 'ports': [], 'uuid': net_id}) self.network2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'load_balancer': [], 'name': 'neutron-%s' % net2_id, 'ports': [], 'uuid': net2_id}) self.mock_get_nw = mock.patch.object( self.helper, '_get_nw_router_info_on_interface_event', return_value=(self.router, self.network)) self.mock_get_nw.start() (self.helper.ovn_nbdb_api.ls_get.return_value. execute.return_value) = self.network def test__update_hm_member_no_members(self): pool_key = 'pool_%s' % self.pool_id self.ovn_lb.external_ids[pool_key] = '' self.assertEqual(self.helper._update_hm_member( self.ovn_lb, pool_key, '10.0.0.4'), constants.ERROR) def test__update_hm_member_backend_ip_not_match(self): pool_key = 'pool_%s' % self.pool_id self.ovn_lb.external_ids[pool_key] = self.member_line with mock.patch.object(ovn_helper.OvnProviderHelper, '_get_member_lsp'): self.assertEqual(self.helper._update_hm_member( self. ovn_lb, pool_key, '10.0.0.4'), constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_ensure_hm_ovn_port') def test__update_hm_member_hm_port_multiple_ip(self, ensure_hm_port): hm_port = { 'fixed_ips': [{ 'subnet_id': 'ipv6_foo', 'ip_address': '2001:db8::199'}, { 'subnet_id': self.member_subnet_id, 'ip_address': '10.0.0.4'}]} ensure_hm_port.return_value = hm_port pool_key = 'pool_%s' % self.pool_id with mock.patch.object(ovn_helper.OvnProviderHelper, '_get_member_lsp'): self.assertEqual(self.helper._update_hm_member( self.ovn_lb, pool_key, self.member_address), constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_ensure_hm_ovn_port') def test__update_hm_member_hm_port_not_found(self, ensure_hm_port): ensure_hm_port.return_value = None pool_key = 'pool_%s' % self.pool_id with mock.patch.object(ovn_helper.OvnProviderHelper, '_get_member_lsp'): self.assertIsNone( self.helper._update_hm_member(self.ovn_lb, pool_key, self.member_address)) def test__clean_ip_port_mappings(self): self.helper._clean_ip_port_mappings(self.ovn_hm_lb) self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', self.ovn_hm_lb.uuid, 'ip_port_mappings') def test__clean_ip_port_mappings_two_hm_pools_sharing_members(self): self.member_line_pool1 = 'member_uuid1_address1:port1_subnet1, \ member_uuid2_address2:port2_subnet1' self.member_line_pool2 = 'member_uuid3_address1:port3_subnet1, \ member_uuid4_address4:port4_subnet1' self.ovn_hm_lb.external_ids['pool_1'] = self.member_line_pool1 self.ovn_hm_lb.external_ids['pool_2'] = self.member_line_pool2 self.ovn_hm_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \ '{"uuid1": "ONLINE", "uuid2": "ONLINE", \ "uuid3": "ONLINE", "uuid4": "ONLINE"}' self.helper._clean_ip_port_mappings(self.ovn_hm_lb, 'pool_1') self.helper.ovn_nbdb_api.db_clear.assert_not_called() self.helper.ovn_nbdb_api.lb_del_ip_port_mapping.\ assert_called_once_with(self.ovn_hm_lb.uuid, 'address2') def test__clean_ip_port_mappings_one_hm_pools_sharing_members(self): self.member_line_pool1 = 'member_uuid1_address1:port1_subnet1, \ member_uuid2_address2:port2_subnet1' self.member_line_pool2 = 'member_uuid3_address1:port3_subnet1, \ member_uuid4_address2:port4_subnet1' self.ovn_hm_lb.external_ids['pool_1'] = self.member_line_pool1 self.ovn_hm_lb.external_ids['pool_2'] = self.member_line_pool2 self.ovn_hm_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \ '{"uuid1": "ONLINE", "uuid2": "ONLINE", \ "uuid3": "NO_MONITOR", "uuid4": "NO_MONITOR"}' self.helper._clean_ip_port_mappings(self.ovn_hm_lb, 'pool_1') self.helper.ovn_nbdb_api.db_clear.assert_not_called() self.helper.ovn_nbdb_api.lb_del_ip_port_mapping.\ assert_has_calls([mock.call(self.ovn_hm_lb.uuid, 'address1'), mock.ANY, mock.call(self.ovn_hm_lb.uuid, 'address2'), mock.ANY]) def test__clean_ip_port_mappings_two_hm_pools_not_sharing_members(self): self.member_line_pool1 = 'member_uuid1_address1:port1_subnet1, \ member_uuid2_address2:port2_subnet1' self.member_line_pool2 = 'member_uuid3_address3:port3_subnet1, \ member_uuid4_address4:port4_subnet1' self.ovn_hm_lb.external_ids['pool_1'] = self.member_line_pool1 self.ovn_hm_lb.external_ids['pool_2'] = self.member_line_pool2 self.ovn_hm_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \ '{"uuid1": "ONLINE", "uuid2": "ONLINE", \ "uuid3": "ONLINE", "uuid4": "ONLINE"}' self.helper._clean_ip_port_mappings(self.ovn_hm_lb, 'pool_1') self.helper.ovn_nbdb_api.db_clear.assert_not_called() self.helper.ovn_nbdb_api.lb_del_ip_port_mapping.\ assert_has_calls([mock.call(self.ovn_hm_lb.uuid, 'address1'), mock.ANY, mock.call(self.ovn_hm_lb.uuid, 'address2'), mock.ANY]) def test__update_ip_port_mappings_del_backend_member(self): src_ip = '10.22.33.4' self.helper._update_ip_port_mappings( self.ovn_lb, self.member_address, 'a-logical-port', src_ip, 'test_pool_key', delete=True) self.helper.ovn_nbdb_api.lb_del_ip_port_mapping.\ assert_called_once_with(self.ovn_lb.uuid, self.member_address) def test__update_ip_port_mappings_add_backend_member(self): src_ip = '10.22.33.4' self.helper._update_ip_port_mappings( self.ovn_lb, self.member_address, 'a-logical-port', src_ip, 'test_pool_key') self.helper.ovn_nbdb_api.lb_add_ip_port_mapping.\ assert_called_once_with(self.ovn_lb.uuid, self.member_address, 'a-logical-port', src_ip) def test__update_ip_port_mappings_del_backend_member_ipv6(self): member_address = 'fda2:918e:5869:0:f816:3eff:feab:cdef' src_ip = 'fda2:918e:5869:0:f816:3eff:fecd:398a' self.helper._update_ip_port_mappings( self.ovn_lb, member_address, 'a-logical-port', src_ip, 'test_pool_key', delete=True) self.helper.ovn_nbdb_api.lb_del_ip_port_mapping.\ assert_called_once_with(self.ovn_lb.uuid, member_address) def test__update_ip_port_mappings_add_backend_member_ipv6(self): member_address = 'fda2:918e:5869:0:f816:3eff:feab:cdef' src_ip = 'fda2:918e:5869:0:f816:3eff:fecd:398a' self.helper._update_ip_port_mappings( self.ovn_lb, member_address, 'a-logical-port', src_ip, 'test_pool_key') self.helper.ovn_nbdb_api.lb_add_ip_port_mapping.\ assert_called_once_with( self.ovn_lb.uuid, member_address, 'a-logical-port', src_ip) def test__update_external_ids_member_status(self): self.helper._update_external_ids_member_status( self.ovn_lb, self.member_id, constants.NO_MONITOR) member_status = { ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}' % (self.member_id, constants.NO_MONITOR)} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', member_status)) def test__update_external_ids_member_status_delete(self): self.helper._update_external_ids_member_status( self.ovn_lb, self.member_id, None, True) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', ovn_const.OVN_MEMBER_STATUS_KEY) def test__update_external_ids_member_status_delete_not_found(self): self.helper._update_external_ids_member_status( self.ovn_lb, 'fool', None, True) member_status = { ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}' % (self.member_id, constants.NO_MONITOR)} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', member_status)) def test__find_member_status(self): status = self.helper._find_member_status(self.ovn_lb, self.member_id) self.assertEqual(status, constants.NO_MONITOR) status = self.helper._find_member_status( self.ovn_hm_lb, self.member_id) self.assertEqual(status, constants.NO_MONITOR) def test__find_member_status_exception(self): status = self.helper._find_member_status(self.ovn_hm_lb, 'foo') self.assertEqual(status, constants.NO_MONITOR) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test__clean_lb_if_empty(self, lb): lb.side_effect = [idlutils.RowNotFound] self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) self.ovn_lb.external_ids.pop('pool_%s' % self.pool_id) commands, lb_to_delete = self.helper._clean_lb_if_empty( self.ovn_lb, self.ovn_lb.uuid, self.ovn_lb.external_ids) self.assertEqual([], commands) self.assertFalse(lb_to_delete) def test__is_lb_empty(self): f = self.helper._is_lb_empty self.assertFalse(f(self.ovn_lb.external_ids)) self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) self.assertFalse(f(self.ovn_lb.external_ids)) self.ovn_lb.external_ids.pop('pool_%s' % self.pool_id) self.assertTrue(f(self.ovn_lb.external_ids)) def test__delete_disabled_from_status(self): f = self.helper._delete_disabled_from_status status = { 'pools': [ {'id': 'f:D', 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'members': [ {'id': 'foo:D', 'provisioning_status': 'ACTIVE'}]} expected = { 'pools': [ {'id': 'f', 'provisioning_status': 'ACTIVE', 'operating_status': 'ONLINE'}], 'members': [ {'id': 'foo', 'provisioning_status': 'ACTIVE'}]} self.assertEqual(f(status), expected) self.assertEqual(f(expected), expected) status = {} self.assertFalse(f(status)) def test__find_ovn_lb_with_pool_key(self): pool_key = self.helper._get_pool_key(uuidutils.generate_uuid()) test_lb = mock.MagicMock() test_lb.external_ids = { ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: ovn_const.PORT_FORWARDING_PLUGIN, pool_key: 'it_is_a_pool_party', } self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.return_value = [test_lb] f = self.real_helper_find_ovn_lb_with_pool_key # Ensure lb is not found, due to its device owner found = f(pool_key) self.assertIsNone(found) # Remove device owner from test_lb.external_ids and make sure test_lb # is found as expected test_lb.external_ids.pop(ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY) found = f(pool_key) self.assertEqual(found, test_lb) # Ensure lb is not found, due to its pool_key not found found = f(self.helper._get_pool_key(uuidutils.generate_uuid())) self.assertIsNone(found) def test__find_ovn_lbs(self): self.mock_find_ovn_lbs.stop() f = self.helper._find_ovn_lbs self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb] # Without protocol specified return a list found = f(self.ovn_lb.id) self.assertListEqual(found, [self.ovn_lb]) self.helper.ovn_nbdb_api.db_find_rows.assert_called_once_with( 'Load_Balancer', ('name', '=', self.ovn_lb.id)) self.helper.ovn_nbdb_api.db_find_rows.reset_mock() # With protocol specified return an instance found = f(self.ovn_lb.id, protocol='tcp') self.assertEqual(found, self.ovn_lb) self.helper.ovn_nbdb_api.db_find_rows.reset_mock() # LB with given protocol not found self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [] self.assertRaises( idlutils.RowNotFound, f, self.ovn_lb.id, protocol='UDP') # LB with given protocol not found self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [] self.assertRaises( idlutils.RowNotFound, f, self.ovn_lb.id, protocol='SCTP') # Multiple protocols udp_lb = copy.copy(self.ovn_lb) udp_lb.protocol = ['udp'] sctp_lb = copy.copy(self.ovn_lb) sctp_lb.protocol = ['sctp'] self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb, udp_lb, sctp_lb] found = f(self.ovn_lb.id) self.assertListEqual(found, [self.ovn_lb, udp_lb, sctp_lb]) # Multiple protocols, just one with correct protocol udp_lb = copy.copy(self.ovn_lb) udp_lb.protocol = ['udp'] self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [udp_lb, self.ovn_lb] found = f(self.ovn_lb.id, protocol='tcp') self.assertEqual(found, self.ovn_lb) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__get_subnet_from_pool(self, net_cli): net_cli.return_value.get_subnet.return_value = ( fakes.FakeSubnet.create_one_subnet( attrs={'cidr': '10.22.33.0/24'})) f = self.helper._get_subnet_from_pool lb = data_models.LoadBalancer( loadbalancer_id=self.loadbalancer_id, name='The LB', vip_address=self.vip_address, vip_subnet_id=self.vip_subnet_id, vip_network_id=self.vip_network_id) lb_pool = data_models.Pool( loadbalancer_id=self.loadbalancer_id, name='The pool', pool_id=self.pool_id, protocol='TCP') with mock.patch.object(self.helper, '_octavia_driver_lib') as dlib: dlib.get_pool.return_value = None found = f('not_found') self.assertEqual((None, None), found) dlib.get_pool.return_value = lb_pool dlib.get_loadbalancer.return_value = lb found = f(self.pool_id) self.assertEqual(found, (lb.vip_subnet_id, '10.22.33.0/24')) net_cli.get_subnet.side_effect = [ openstack.exceptions.ResourceNotFound] dlib.get_pool.return_value = None found = f('not_found') self.assertEqual((None, None), found) def test__check_lbhc_vip_format(self): vip = "192.168.0.1:8080" result = self.helper._check_lbhc_vip_format(vip) self.assertTrue(result) vip = "192.168.0.1" result = self.helper._check_lbhc_vip_format(vip) self.assertFalse(result) vip = "[2001:db8:3333:4444:5555:6666:7777:8888]:8080" result = self.helper._check_lbhc_vip_format(vip) self.assertTrue(result) vip = "[2001:db8:3333:4444:5555:6666:7777:8888]" result = self.helper._check_lbhc_vip_format(vip) self.assertFalse(result) vip = "" result = self.helper._check_lbhc_vip_format(vip) self.assertFalse(result) def test__get_subnet_from_pool_lb_no_vip_subnet_id(self): f = self.helper._get_subnet_from_pool lb = data_models.LoadBalancer( loadbalancer_id=self.loadbalancer_id, name='The LB', vip_address=self.vip_address, vip_network_id=self.vip_network_id) lb_pool = data_models.Pool( loadbalancer_id=self.loadbalancer_id, name='The pool', pool_id=self.pool_id, protocol='TCP') with mock.patch.object(self.helper, '_octavia_driver_lib') as dlib: dlib.get_pool.return_value = None found = f('not_found') self.assertEqual((None, None), found) dlib.get_pool.return_value = lb_pool dlib.get_loadbalancer.return_value = lb found = f(self.pool_id) self.assertEqual((None, None), found) def test__get_or_create_ovn_lb_no_lb_found(self): self.mock_find_ovn_lbs.stop() self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [] self.assertRaises( idlutils.RowNotFound, self.helper._get_or_create_ovn_lb, self.ovn_lb.name, protocol='TCP', admin_state_up='True') @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test__get_or_create_ovn_lb_required_proto_not_found(self, lbc): udp_lb = copy.copy(self.ovn_lb) udp_lb.protocol = ['udp'] udp_lb.external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY] = 'foo' self.mock_find_ovn_lbs.stop() self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.side_effect = [[udp_lb], [self.ovn_lb]] self.helper._get_or_create_ovn_lb( self.ovn_lb.name, protocol='TCP', admin_state_up='True') expected_lb_info = { 'id': self.ovn_lb.name, 'protocol': 'tcp', 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'vip_address': udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_KEY), 'vip_port_id': udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY), ovn_const.LB_EXT_IDS_LR_REF_KEY: udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY), ovn_const.LB_EXT_IDS_LS_REFS_KEY: udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_LS_REFS_KEY), constants.ADDITIONAL_VIPS: [], 'admin_state_up': 'True', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_FIP_KEY), ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY: udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY)} lbc.assert_called_once_with(expected_lb_info, protocol='tcp') def test__get_or_create_ovn_lb_found(self): self.mock_find_ovn_lbs.stop() self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb] found = self.helper._get_or_create_ovn_lb( self.ovn_lb.name, protocol='TCP', admin_state_up='True') self.assertEqual(found, self.ovn_lb) def test__get_or_create_ovn_lb_lb_without_protocol(self): self.mock_find_ovn_lbs.stop() self.ovn_lb.protocol = [] self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb] found = self.helper._get_or_create_ovn_lb( self.ovn_lb.name, protocol='TCP', admin_state_up='True') self.assertEqual(found, self.ovn_lb) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('protocol', 'tcp')) @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test__get_or_create_ovn_lb_no_vip_fip(self, lbc): self.mock_find_ovn_lbs.stop() udp_lb = copy.copy(self.ovn_lb) udp_lb.external_ids.pop(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) udp_lb.protocol = ['udp'] self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.side_effect = [[udp_lb], [self.ovn_lb]] self.helper._get_or_create_ovn_lb( self.ovn_lb.name, protocol='TCP', admin_state_up='True') expected_lb_info = { 'id': self.ovn_lb.name, 'protocol': 'tcp', 'lb_algorithm': constants.LB_ALGORITHM_SOURCE_IP_PORT, 'vip_address': udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_KEY), 'vip_port_id': udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY), ovn_const.LB_EXT_IDS_LR_REF_KEY: udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY), ovn_const.LB_EXT_IDS_LS_REFS_KEY: udp_lb.external_ids.get( ovn_const.LB_EXT_IDS_LS_REFS_KEY), constants.ADDITIONAL_VIPS: [], 'admin_state_up': 'True'} lbc.assert_called_once_with(expected_lb_info, protocol='tcp') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_disabled(self, net_cli): self.lb['admin_state_up'] = False net_cli.return_value.ports.return_value = self.ports status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'False'}, name=mock.ANY, protocol=[], selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_enabled(self, net_cli): self.lb['admin_state_up'] = True net_cli.return_value.ports.return_value = self.ports status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'}, name=mock.ANY, protocol=[], selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_with_additional_vips(self, net_cli): self.lb['admin_state_up'] = True self.lb['additional_vips'] = self.additional_vips net_cli.return_value.ports.return_value = self.ports status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: self.additional_vips[0]['ip_address'], ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY: self.additional_vips[0]['port_id'], 'enabled': 'True'}, name=mock.ANY, protocol=[], selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_port_from_info') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_create_with_additional_vips_exception_no_ports_leftover( self, del_port, gpfi, net_cli): gpfi.side_effect = [ (Port(name=ovn_const.LB_VIP_PORT_PREFIX + self.loadbalancer_id, id='port_vip_id'), mock.ANY), (Port(name=ovn_const.LB_VIP_ADDIT_PORT_PREFIX + '1-' + self.loadbalancer_id, id='port_addi_vip_id'), None), RuntimeError] self.additional_vips.append({}) self.lb['additional_vips'] = self.additional_vips status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) self.helper.ovn_nbdb_api.db_create.assert_not_called() expected_calls = [ mock.call('port_vip_id'), mock.call('port_addi_vip_id')] del_port.assert_has_calls(expected_calls) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_lb_to_ls_association') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_port_from_info') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_create_with_addi_vips_exception_no_ports_leftover_on_create( self, del_port, gpfi, update_lb_ls, net_cli): self.lb['admin_state_up'] = True self.lb['additional_vips'] = self.additional_vips gpfi.side_effect = [ (Port(name=ovn_const.LB_VIP_PORT_PREFIX + self.loadbalancer_id, id='port_vip_id'), mock.ANY), (Port(name=ovn_const.LB_VIP_ADDIT_PORT_PREFIX + '1-' + self.loadbalancer_id, id='port_addi_vip_id'), None)] update_lb_ls.side_efffect = [RuntimeError] net_cli.return_value.ports.return_value = self.ports status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: self.additional_vips[0]['ip_address'], ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY: self.additional_vips[0]['port_id'], 'enabled': 'True'}, name=mock.ANY, protocol=[], selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) expected_calls = [ mock.call('port_vip_id'), mock.call('port_addi_vip_id')] del_port.assert_has_calls(expected_calls) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_assoc_lb_to_lr_by_step(self, net_cli, f_lr): self.mock_find_ovn_lbs.stop() self.helper._find_ovn_lbs self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb] self._update_lb_to_ls_association.stop() self.lb['admin_state_up'] = True f_lr.return_value = self.router net_cli.return_value.ports.return_value = self.ports self.helper._update_lb_to_lr_association.side_effect = [ idlutils.RowNotFound] status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'}, name=mock.ANY, protocol=[], selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) self.helper._update_lb_to_lr_association.assert_called_once_with( self.ovn_lb, self.router ) self.helper._update_lb_to_lr_association_by_step \ .assert_called_once_with( self.ovn_lb, self.router) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_selection_fields_not_supported(self, net_cli): self.lb['admin_state_up'] = True net_cli.return_value.ports.return_value = self.ports self.helper._are_selection_fields_supported = ( mock.Mock(return_value=False)) status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'}, name=mock.ANY, protocol=[]) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_selection_fields_not_supported_algo(self, net_cli): self.lb['admin_state_up'] = True net_cli.return_value.ports.return_value = self.ports net_cli.return_value.get_subnet.return_value = mock.MagicMock() self.pool['lb_algoritm'] = 'foo' status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) # NOTE(mjozefcz): Make sure that we use the same selection # fields as for default algorithm - source_ip_port. self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'}, name=mock.ANY, protocol=[], selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def _test_lb_create_on_multi_protocol(self, protocol, provider, net_cli): """This test situation when new protocol is added to the same loadbalancer and we need to add additional OVN lb with the same name. """ self.lb['admin_state_up'] = True self.lb['protocol'] = protocol self.lb[ovn_const.LB_EXT_IDS_LR_REF_KEY] = 'foo' self.lb[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = '{\"neutron-foo\": 1}' net_cli.return_value.ports.return_value = self.ports fake_network = mock.MagicMock() fake_network.id = self.lb['vip_network_id'] fake_network.provider_physical_network = provider net_cli.return_value.get_network.return_value = fake_network status = self.helper.lb_create(self.lb, protocol=protocol) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) self.helper.ovn_nbdb_api.db_create.assert_called_once_with( 'Load_Balancer', external_ids={ ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, ovn_const.LB_EXT_IDS_LR_REF_KEY: 'foo', 'enabled': 'True'}, name=mock.ANY, protocol=protocol.lower(), selection_fields=['ip_src', 'ip_dst', 'tp_src', 'tp_dst']) if provider: self.helper._update_lb_to_ls_association.assert_not_called() else: self.helper._update_lb_to_ls_association.assert_has_calls([ mock.call(self.ovn_lb, associate=True, network_id=self.lb['vip_network_id'], update_ls_ref=True, additional_vips=True), mock.call(self.ovn_lb, associate=True, network_id='foo', update_ls_ref=True)]) def test_lb_create_on_multi_protocol_UDP(self): self._test_lb_create_on_multi_protocol('UDP', None) def test_lb_create_on_multi_protocol_SCTP(self): self._test_lb_create_on_multi_protocol('SCTP', None) def _test_lb_create_on_provider_network(self): # Test case for LB created on provider network. # Ensure LB is not associated to the LS in that case self._test_lb_create_on_multi_protocol('TCP', "provider") @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_create_neutron_client_exception(self, net_cli): net_cli.return_value.ports.return_value = self.ports net_cli.return_value.get_subnet.side_effect = [ openstack.exceptions.ResourceNotFound] status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_create_exception(self, del_port, net_cli): self.helper._find_ovn_lbs.side_effect = [RuntimeError] net_cli.return_value.ports.return_value = self.ports status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) del_port.assert_called_once_with(self.ports[0].id) del_port.side_effect = [Exception] status = self.helper.lb_create(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync(self, net_cli): self.lb['admin_state_up'] = True net_cli.return_value.ports.return_value = self.ports fake_network = mock.MagicMock() fake_network.id = self.lb['vip_network_id'] fake_network.provider_physical_network = 'TCP' net_cli.return_value.get_network.return_value = fake_network self.helper.lb_sync(self.lb, self.ovn_lb) self.helper._update_lb_to_ls_association.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'})) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_no_changes_required(self, net_cli): net_cli.return_value.ports.return_value = self.ports fake_network = mock.MagicMock() fake_network.id = self.lb['vip_network_id'] net_cli.return_value.get_network.return_value = fake_network self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: self.ports[0].fixed_ips[0]['ip_address'], ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: self.ports[0].id, 'enabled': 'False'} self.helper.lb_sync(self.lb, self.ovn_lb) self.helper._update_lb_to_ls_association.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_client_exception(self, net_cli): self.lb['admin_state_up'] = True net_cli.side_effect = [exceptions.DriverError] with mock.patch.object(ovn_helper, 'LOG') as m_l: self.assertFalse(self.helper.lb_sync(self.lb, self.ovn_lb)) m_l.warn.assert_called_once_with( ('Cannot get client from neutron An unknown driver error ' 'occurred.') ) @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_port_from_info') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_exception_missing_info(self, net_cli, m_gpri): self.lb['admin_state_up'] = True m_gpri.side_effect = [AttributeError] with mock.patch.object(ovn_helper, 'LOG') as m_l: self.assertFalse(self.helper.lb_sync(self.lb, self.ovn_lb)) m_l.warn.assert_called_once_with( ("Load Balancer VIP port missing information.") ) @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_port_from_info') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_exception_vip_port_not_found(self, net_cli, m_gpri): self.lb['admin_state_up'] = True m_gpri.side_effect = [openstack.exceptions.ResourceNotFound] with mock.patch.object(ovn_helper, 'LOG') as m_l: self.assertFalse(self.helper.lb_sync(self.lb, self.ovn_lb)) m_l.warn.assert_called_once_with( ("Load balancer VIP port and subnet not found.") ) @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_execution_commands_exception(self, net_cli, m_exec_comm): self.lb['admin_state_up'] = True net_cli.return_value.ports.return_value = self.ports m_exec_comm.side_effect = [RuntimeError('fail')] with mock.patch.object(ovn_helper, 'LOG') as m_l: self.assertFalse(self.helper.lb_sync(self.lb, self.ovn_lb)) m_l.exception.assert_called_once_with( ("Failed to execute commands for load balancer sync: fail") ) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lbs_with_retry') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_ovn_lb_not_found_exception(self, net_cli, m_f_ovn): self.lb['admin_state_up'] = True net_cli.return_value.ports.return_value = self.ports m_f_ovn.side_effect = [idlutils.RowNotFound] with mock.patch.object(ovn_helper, 'LOG') as m_l: self.assertFalse(self.helper.lb_sync(self.lb, self.ovn_lb)) m_l.exception.assert_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_sync_lb_associations') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_sync_lb_assoc_exception(self, net_cli, m_syn_lb_assoc): self.lb['admin_state_up'] = True net_cli.return_value.ports.return_value = self.ports m_syn_lb_assoc.side_effect = [RuntimeError('fail')] with mock.patch.object(ovn_helper, 'LOG') as m_l: self.assertFalse(self.helper.lb_sync(self.lb, self.ovn_lb)) m_l.exception.assert_called_once_with( ("Failed syncing lb associations on LS and LR for load " "balancer sync: fail") ) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_no_selection_fields(self, net_cli, m_f): self.lb['admin_state_up'] = True self.ovn_lb.selection_fields = [] net_cli.return_value.ports.return_value = self.ports m_f.return_value = self.router self.helper.lb_sync(self.lb, self.ovn_lb) self.helper._update_lb_to_ls_association.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_has_calls([ mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'})), mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('selection_fields', ['ip_dst', 'ip_src', 'tp_dst', 'tp_src'])) ]) self.helper._update_lb_to_lr_association.assert_called_once_with( self.ovn_lb, self.router, is_sync=True) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_get_lr(self, net_cli, m_f): self.lb['admin_state_up'] = True net_cli.return_value.ports.return_value = self.ports m_f.return_value = self.router self.helper.lb_sync(self.lb, self.ovn_lb) self.helper._update_lb_to_ls_association.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'})) self.helper._update_lb_to_lr_association.assert_called_once_with( self.ovn_lb, self.router, is_sync=True) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_get_lr_by_step(self, net_cli, m_f): self.helper._update_lb_to_lr_association.side_effect = [ idlutils.RowNotFound] self.lb['admin_state_up'] = True net_cli.return_value.ports.return_value = self.ports m_f.return_value = self.router self.helper.lb_sync(self.lb, self.ovn_lb) self.helper._update_lb_to_ls_association.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'})) self.helper._update_lb_to_lr_association_by_step.\ assert_called_once_with(self.ovn_lb, self.router, is_sync=True) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_with_no_provider_net(self, net_cli): self.lb['admin_state_up'] = True net_cli.return_value.ports.return_value = self.ports fake_network = mock.MagicMock() fake_network.id = self.lb['vip_network_id'] fake_network.provider_physical_network = None net_cli.return_value.get_network.return_value = fake_network self.helper.lb_sync(self.lb, self.ovn_lb) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': 'True'})) self.helper._update_lb_to_ls_association.assert_has_calls([ mock.call(self.ovn_lb, associate=True, network_id=self.lb['vip_network_id'], update_ls_ref=True, additional_vips=True, is_sync=True) ]) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_with_additional_vips(self, net_cli): self.lb['admin_state_up'] = True self.lb['additional_vips'] = self.additional_vips self.lb[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = '123.123.123.123' vip_fip = '10.0.0.123' self.lb[ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY] = vip_fip self.lb[ovn_const.LB_EXT_IDS_LR_REF_KEY] = 'foo' net_cli.return_value.ports.return_value = self.ports self.helper.lb_sync(self.lb, self.ovn_lb) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: self.additional_vips[0]['ip_address'], ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY: self.additional_vips[0]['port_id'], ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY: vip_fip, ovn_const.LB_EXT_IDS_LR_REF_KEY: 'foo', 'enabled': 'True'})) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_with_ls_refs_error_val(self, net_cli): self.lb['admin_state_up'] = True self.lb['additional_vips'] = self.additional_vips self.lb[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = '123.123.123.123' vip_fip = '10.0.0.123' self.lb[ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY] = vip_fip self.lb[ovn_const.LB_EXT_IDS_LR_REF_KEY] = 'foo' self.lb[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = '{wrong key}' net_cli.return_value.ports.return_value = self.ports self.helper.lb_sync(self.lb, self.ovn_lb) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: self.additional_vips[0]['ip_address'], ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY: self.additional_vips[0]['port_id'], ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY: vip_fip, ovn_const.LB_EXT_IDS_LR_REF_KEY: 'foo', 'enabled': 'True'})) self.helper._update_lb_to_ls_association.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_sync_with_ls_refs(self, net_cli): self.lb['admin_state_up'] = True self.lb['additional_vips'] = self.additional_vips self.lb[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = '123.123.123.123' vip_fip = '10.0.0.123' self.lb[ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY] = vip_fip self.lb[ovn_const.LB_EXT_IDS_LR_REF_KEY] = 'foo' self.lb[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = ( '{\"neutron-%s\": 1, \"neutron-fake_id\": 1}' % self.network.uuid) net_cli.return_value.ports.return_value = self.ports self.helper.lb_sync(self.lb, self.ovn_lb) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: self.additional_vips[0]['ip_address'], ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY: self.additional_vips[0]['port_id'], ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY: vip_fip, ovn_const.LB_EXT_IDS_LR_REF_KEY: 'foo', 'enabled': 'True'})) self.helper._update_lb_to_ls_association.assert_has_calls([ mock.call(mock.ANY, network_id='fake_id', associate=True, update_ls_ref=True, is_sync=True) ]) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete(self, del_port, net_cli): net_cli.return_value.delete_port.return_value = None status = self.helper.lb_delete(self.ovn_lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) del_port.assert_called_once_with('foo_port') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_with_health_monitor(self, del_port, net_cli): net_cli.return_value.delete_port.return_value = None self.ovn_lb.health_check = [self.ovn_hm] status = self.helper.lb_delete(self.ovn_lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) del_port.assert_called_once_with('foo_port') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_additional_vips(self, del_port, net_cli): net_cli.return_value.delete_port.return_value = None self.ovn_lb.external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY] = \ '10.24.34.4,10.25.35.4' self.ovn_lb.external_ids[ ovn_const.LB_EXT_IDS_ADDIT_VIP_PORT_ID_KEY] = \ 'addi_foo_port,addi_foo_port_2' status = self.helper.lb_delete(self.ovn_lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) expected_calls = [ mock.call('foo_port'), mock.call('addi_foo_port'), mock.call('addi_foo_port_2')] del_port.assert_has_calls(expected_calls) @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_vip_port_from_loadbalancer_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_additional_vips_from_loadbalancer_id') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_row_not_found( self, del_port, get_addi_vip_port, get_vip_port): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] get_vip_port.return_value = None get_addi_vip_port.return_value = [] status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_not_called() del_port.assert_not_called() get_vip_port.assert_called_once_with(self.lb['id']) @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_vip_port_from_loadbalancer_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_additional_vips_from_loadbalancer_id') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_row_not_found_with_additional_vips(self, del_port, get_addi_vip_port, get_vip_port): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] get_vip_port.return_value = None get_addi_vip_port.return_value = [{'port_id': 'addi_foo_port'}, {'port_id': 'addi_foo_port_2'}] status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_not_called() expected_calls = [ mock.call('addi_foo_port'), mock.call('addi_foo_port_2')] del_port.assert_has_calls(expected_calls) get_vip_port.assert_called_once_with(self.lb['id']) @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_vip_port_from_loadbalancer_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_additional_vips_from_loadbalancer_id') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_row_not_found_port_leftover( self, del_port, get_addi_vip_port, get_vip_port): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] get_vip_port.return_value = 'foo' get_addi_vip_port.return_value = [] del_port.side_effect = [Exception] status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) self.helper.ovn_nbdb_api.lb_del.assert_not_called() del_port.assert_called_once_with('foo') get_vip_port.assert_called_once_with(self.lb['id']) @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_vip_port_from_loadbalancer_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_additional_vips_from_loadbalancer_id') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_row_not_found_vip_leak( self, del_port, get_addi_vip_port, get_vip_port): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] get_vip_port.return_value = 'foo_port' get_addi_vip_port.return_value = [] status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_not_called() del_port.assert_called_once_with('foo_port') get_vip_port.assert_called_once_with(self.lb['id']) @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_exception(self, del_port): self.helper.ovn_nbdb_api.lb_del.side_effect = [RuntimeError] status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) del_port.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_step_by_step(self, del_port): self.helper.ovn_nbdb_api.lr_lb_del.side_effect = [idlutils.RowNotFound] status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) del_port.assert_called_once_with('foo_port') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_step_by_step_exception(self, del_port): self.helper.ovn_nbdb_api.lb_del.side_effect = [idlutils.RowNotFound] status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) del_port.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_port_not_found(self, del_port, net_cli): net_cli.return_value.delete_port.side_effect = ( [n_exc.PortNotFoundClient]) status = self.helper.lb_delete(self.ovn_lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) del_port.assert_called_once_with('foo_port') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_lb_delete_port_exception(self, del_port, net_cli): del_port.side_effect = [Exception] status = self.helper.lb_delete(self.ovn_lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) del_port.assert_called_once_with('foo_port') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_delete_cascade(self, net_cli): net_cli.return_value.delete_port.return_value = None self.lb['cascade'] = True status = self.helper.lb_delete(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['pools'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['members'][0]['provisioning_status'], constants.DELETED) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_delete_ls_lr(self, net_cli): self.ovn_lb.external_ids.update({ ovn_const.LB_EXT_IDS_LR_REF_KEY: self.router.name, ovn_const.LB_EXT_IDS_LS_REFS_KEY: '{\"neutron-%s\": 1}' % self.network.uuid}) net_cli.return_value.delete_port.return_value = None (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.helper.ovn_nbdb_api.lookup.return_value = self.router self.helper.lb_delete(self.ovn_lb) self.helper.ovn_nbdb_api.ls_lb_del.assert_called_once_with( self.network.uuid, self.ovn_lb.uuid) self.helper.ovn_nbdb_api.lr_lb_del.assert_called_once_with( self.router.uuid, self.ovn_lb.uuid) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_lb_delete_multiple_protocols(self, net_cli): net_cli.return_value.delete_port.return_value = None self.mock_find_ovn_lbs.stop() udp_lb = copy.copy(self.ovn_lb) udp_lb.protocol = ['udp'] udp_lb.uuid = 'foo_uuid' self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb, udp_lb] self.helper.lb_delete(self.lb) self.helper.ovn_nbdb_api.lb_del.assert_has_calls([ mock.call(self.ovn_lb.uuid), mock.call(udp_lb.uuid)]) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_lb_update_disabled(self, refresh_vips): self.lb['admin_state_up'] = False status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.OFFLINE) refresh_vips.assert_called_once_with( self.ovn_lb, self.ovn_lb.external_ids) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'enabled': 'False'})) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_lb_update_enabled(self, refresh_vips): # Change the mock, its enabled by default. self.ovn_lb.external_ids.update({'enabled': False}) self.lb['admin_state_up'] = True status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) refresh_vips.assert_called_once_with( self.ovn_lb, self.ovn_lb.external_ids) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'enabled': 'True'})) # update to re-enable self.ovn_lb.external_ids.update({'enabled': True}) self.lb['admin_state_up'] = True status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) refresh_vips.assert_called_once_with( self.ovn_lb, self.ovn_lb.external_ids) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'enabled': 'True'})) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_lb_update_enabled_multiple_protocols(self, refresh_vips): self.mock_find_ovn_lbs.stop() self.ovn_lb.external_ids.update({'enabled': 'False'}) udp_lb = copy.deepcopy(self.ovn_lb) udp_lb.protocol = ['udp'] udp_lb.uuid = 'foo_uuid' self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_lb, udp_lb] self.lb['admin_state_up'] = True status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) refresh_vips.assert_has_calls([ mock.call(self.ovn_lb, self.ovn_lb.external_ids), mock.ANY, mock.ANY, mock.call(udp_lb, udp_lb.external_ids)], any_order=False) self.helper.ovn_nbdb_api.db_set.assert_has_calls([ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'enabled': 'True'})), mock.call('Load_Balancer', udp_lb.uuid, ('external_ids', {'enabled': 'True'}))]) def test_lb_update_exception(self): self.helper._find_ovn_lbs.side_effect = [RuntimeError] status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) def test_lb_update_no_admin_state_up(self): self.lb.pop('admin_state_up') status = self.helper.lb_update(self.lb) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.helper._find_ovn_lbs.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_create_disabled(self, refresh_vips): self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) status = self.helper.listener_create(self.listener) # Set expected as disabled self.ovn_lb.external_ids.update({ 'listener_%s:D' % self.listener_id: '80:pool_%s' % self.pool_id}) refresh_vips.assert_called_once_with( self.ovn_lb, self.ovn_lb.external_ids) expected_calls = [ mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'listener_%s:D' % self.listener_id: '80:pool_%s' % self.pool_id})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('protocol', 'tcp'))] self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_calls) self.assertEqual( len(expected_calls), self.helper.ovn_nbdb_api.db_set.call_count) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_create_enabled(self, refresh_vips): self.listener['admin_state_up'] = True status = self.helper.listener_create(self.listener) refresh_vips.assert_called_once_with( self.ovn_lb, self.ovn_lb.external_ids) expected_calls = [ mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_calls) self.assertEqual( len(expected_calls), self.helper.ovn_nbdb_api.db_set.call_count) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) def test_listener_create_no_default_pool(self): self.listener['admin_state_up'] = True self.listener.pop('default_pool_id') self.helper.listener_create(self.listener) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'listener_%s' % self.listener_id: '80:'})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) self.assertEqual( len(expected_calls), self.helper.ovn_nbdb_api.db_set.call_count) def test_listener_create_exception(self): self.helper.ovn_nbdb_api.db_set.side_effect = [RuntimeError] status = self.helper.listener_create(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['listeners'][0]['operating_status'], constants.ERROR) def test_listener_update(self): status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.listener['admin_state_up'] = True status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) def test_listener_update_row_not_found(self): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ERROR) self.helper.ovn_nbdb_api.db_set.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_exception(self, refresh_vips): refresh_vips.side_effect = [RuntimeError] status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_listener_enabled(self, refresh_vips): self.listener['admin_state_up'] = True # Update the listener port. self.listener.update({'protocol_port': 123}) status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'listener_%s' % self.listener_id: '123:pool_%s' % self.pool_id})) # Update expected listener, because it was updated. self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) self.ovn_lb.external_ids.update( {'listener_%s' % self.listener_id: '123:pool_%s' % self.pool_id}) refresh_vips.assert_called_once_with( self.ovn_lb, self.ovn_lb.external_ids) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_listener_disabled(self, refresh_vips): self.listener['admin_state_up'] = False status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) # It gets disabled, so update the key self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) self.ovn_lb.external_ids.update( {'listener_%s:D' % self.listener_id: '80:pool_%s' % self.pool_id}) refresh_vips.assert_called_once_with( self.ovn_lb, self.ovn_lb.external_ids) # As it is marked disabled, a second call should not try and remove it self.helper.ovn_nbdb_api.db_remove.reset_mock() status = self.helper.listener_update(self.listener) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.db_remove.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_no_admin_state_up(self, refresh_vips): self.listener.pop('admin_state_up') status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.helper.ovn_nbdb_api.db_remove.assert_not_called() refresh_vips.assert_called_once_with( self.ovn_lb, self.ovn_lb.external_ids) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_no_admin_state_up_or_default_pool_id( self, refresh_vips): self.listener.pop('admin_state_up') self.listener.pop('default_pool_id') status = self.helper.listener_update(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.helper.ovn_nbdb_api.db_remove.assert_not_called() refresh_vips.assert_not_called() def test_listener_delete_no_external_id(self): self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) status = self.helper.listener_delete(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.db_remove.assert_not_called() def test_listener_delete_row_not_found(self): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] status = self.helper.listener_delete(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) def test_listener_delete_exception(self): self.helper.ovn_nbdb_api.db_remove.side_effect = [RuntimeError] status = self.helper.listener_delete(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['listeners'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_delete_external_id(self, refresh_vips): status = self.helper.listener_delete(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) refresh_vips.assert_called_once_with( self.ovn_lb, self.ovn_lb.external_ids) @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_listener_delete_ovn_lb_not_empty(self, lb_empty): lb_empty.return_value = False self.helper.listener_delete(self.listener) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_listener_delete_ovn_lb_empty_octavia_lb_empty(self, lb_empty): """That test situation when the OVN and Octavia LBs are empty. That test situation when both OVN and Octavia LBs are empty, but we cannot remove OVN LB row. """ lb_empty.return_value = True self.helper.listener_delete(self.listener) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() # Assert that protocol has been set to []. self.helper.ovn_nbdb_api.db_set.assert_has_calls([ mock.call('Load_Balancer', self.ovn_lb.uuid, ('protocol', []))]) @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_listener_delete_ovn_lb_empty_octavia_lb_not_empty(self, lb_empty): """We test if we can remove one LB with not used protocol""" ovn_lb_udp = copy.copy(self.ovn_lb) ovn_lb_udp.protocol = ['udp'] self.mock_find_ovn_lbs.stop() self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.side_effect = [[self.ovn_lb], [self.ovn_lb, ovn_lb_udp]] lb_empty.return_value = True self.helper.listener_delete(self.listener) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) # Validate that the vips column hasn't been touched, because # in previous command we remove the LB, so there is no need # to update it. self.helper.ovn_nbdb_api.db_set.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_listener_delete_ovn_lb_empty_ovn_lb_not_found(self, lb_empty): """That test situation when the OVN and Octavia LBs are empty. That test situation when both OVN and Octavia LBs are empty, but we cannot find the OVN LB row when cleaning. """ self.helper._find_ovn_lbs.side_effect = [ self.ovn_lb, idlutils.RowNotFound] lb_empty.return_value = True self.helper.listener_delete(self.listener) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'listener_%s' % self.listener_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() # vip refresh will have been called self.helper.ovn_nbdb_api.db_clear.assert_has_calls([ mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips'))]) self.helper.ovn_nbdb_api.db_set.assert_has_calls([ mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {}))]) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_sync_listener_same_in_externals_ids(self, refresh_vips): self.listener['admin_state_up'] = True listener_key = 'listener_%s' % self.listener_id self.ovn_lb.external_ids[listener_key] = f"80:pool_{self.pool_id}" self.helper.listener_sync(self.listener, self.ovn_lb) refresh_vips.assert_called_once_with( self.ovn_lb, self.ovn_lb.external_ids, is_sync=True) self.helper.ovn_nbdb_api.db_set.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_sync_listener_diff_in_externals_ids(self, refresh_vips): self.listener['admin_state_up'] = True listener_key = 'listener_%s' % self.listener_id external_ids = copy.deepcopy(self.ovn_lb.external_ids) self.ovn_lb.external_ids[listener_key] = '' self.helper.listener_sync(self.listener, self.ovn_lb) refresh_vips.assert_called_once_with( self.ovn_lb, external_ids, is_sync=True) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', { f"listener_{self.listener_id}": f"80:pool_{self.pool_id}"})) ] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_listener_sync_exception(self, execute_commands): execute_commands.side_effect = [RuntimeError('a fail')] self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) self.listener['admin_state_up'] = True with mock.patch.object(ovn_helper, 'LOG') as m_l: self.assertIsNone(self.helper.listener_sync( self.listener, self.ovn_lb)) m_l.exception.assert_called_once_with( 'Failed to execute commands for listener sync: a fail') @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_sync_refresh_vips_exception(self, refresh_lb_vips): refresh_lb_vips.side_effect = [RuntimeError('a fail')] self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) self.listener['admin_state_up'] = True with mock.patch.object(ovn_helper, 'LOG') as m_l: self.assertIsNone(self.helper.listener_sync( self.listener, self.ovn_lb)) m_l.exception.assert_called_once_with( 'Failed to refresh LB VIPs: a fail') @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_sync_listener_not_in_externals_ids( self, refresh_vips): self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) self.listener['admin_state_up'] = True self.helper.listener_sync(self.listener, self.ovn_lb) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', { f"listener_{self.listener_id}": f"80:pool_{self.pool_id}"})) ] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_pool_create(self): status = self.helper.pool_create(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.pool['admin_state_up'] = True # Pool Operating status shouldnt change if member isnt present. status = self.helper.pool_create(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) # Pool without listener set should be OFFLINE self.pool['listener_id'] = None status = self.helper.pool_create(self.pool) self.assertEqual(status['pools'][0]['operating_status'], constants.OFFLINE) def test_pool_create_exception(self): self.helper.ovn_nbdb_api.db_set.side_effect = [ RuntimeError, RuntimeError] status = self.helper.pool_create(self.pool) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.pool['listener_id'] = None status = self.helper.pool_create(self.pool) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) def test_pool_update(self): status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.OFFLINE) self.pool['admin_state_up'] = True status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) def test_pool_update_exception_not_found(self): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] status = self.helper.pool_update(self.pool) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ERROR) def test_pool_update_exception(self): self.helper._get_pool_listeners.side_effect = [RuntimeError] status = self.helper.pool_update(self.pool) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ERROR) def test_pool_update_unset_admin_state_up(self): self.pool.pop('admin_state_up') status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) def test_pool_update_pool_disabled_change_to_up(self): self.pool.update({'admin_state_up': True}) disabled_p_key = self.helper._get_pool_key(self.pool_id, is_enabled=False) p_key = self.helper._get_pool_key(self.pool_id) self.ovn_lb.external_ids.update({ disabled_p_key: self.member_line}) self.ovn_lb.external_ids.pop(p_key) status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'pool_%s' % self.pool_id: self.member_line})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {'10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_pool_update_pool_disabled_change_to_down(self): self.pool.update({'admin_state_up': False}) disabled_p_key = self.helper._get_pool_key(self.pool_id, is_enabled=False) p_key = self.helper._get_pool_key(self.pool_id) self.ovn_lb.external_ids.update({ disabled_p_key: self.member_line}) self.ovn_lb.external_ids.pop(p_key) status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.db_set.assert_not_called() def test_pool_update_pool_up_change_to_disabled(self): self.pool.update({'admin_state_up': False}) status = self.helper.pool_update(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.OFFLINE) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'pool_%s:D' % self.pool_id: self.member_line})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_pool_update_listeners(self): self.helper._get_pool_listeners.return_value = ['listener1'] status = self.helper.pool_update(self.pool) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) def test_pool_update_listeners_none(self): status = self.helper.pool_update(self.pool) self.assertFalse(status['listeners']) def test_pool_delete(self): status = self.helper.pool_delete(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.DELETED) self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'vips') self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s' % self.pool_id) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {})), mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port', 'enabled': True, 'listener_%s' % self.listener_id: '80:', ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}' % (self.member_id, constants.NO_MONITOR)}))] self.assertEqual(self.helper.ovn_nbdb_api.db_set.call_count, len(expected_calls)) self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_pool_delete_row_not_found(self): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] status = self.helper.pool_delete(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.DELETED) self.helper.ovn_nbdb_api.db_remove.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_not_called() def test_pool_delete_exception(self): self.helper.ovn_nbdb_api.db_set.side_effect = [RuntimeError] status = self.helper.pool_delete(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ERROR) def test_pool_delete_associated_listeners(self): self.helper._get_pool_listeners.return_value = ['listener1'] status = self.helper.pool_delete(self.pool) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.helper.ovn_nbdb_api.db_set.assert_called_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'enabled': True, 'listener_%s' % self.listener_id: '80:', ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port', ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}' % (self.member_id, constants.NO_MONITOR)})) def test_pool_delete_pool_disabled(self): disabled_p_key = self.helper._get_pool_key(self.pool_id, is_enabled=False) p_key = self.helper._get_pool_key(self.pool_id) self.ovn_lb.external_ids.update({ disabled_p_key: self.member_line}) self.ovn_lb.external_ids.pop(p_key) status = self.helper.pool_delete(self.pool) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.DELETED) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s:D' % self.pool_id) @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_pool_delete_ovn_lb_not_empty(self, lb_empty): lb_empty.return_value = False self.helper.pool_delete(self.pool) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s' % self.pool_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_pool_delete_ovn_lb_empty_lb_empty(self, lb_empty): lb_empty.return_value = True self.helper.pool_delete(self.pool) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s' % self.pool_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() # Assert that protocol has been set to []. self.helper.ovn_nbdb_api.db_set.assert_called_with( 'Load_Balancer', self.ovn_lb.uuid, ('protocol', [])) @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_pool_delete_ovn_lb_empty_lb_not_empty(self, lb_empty): ovn_lb_udp = copy.copy(self.ovn_lb) self.mock_find_ovn_lbs.stop() self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.side_effect = [[self.ovn_lb], [self.ovn_lb, ovn_lb_udp]] lb_empty.return_value = True self.helper.pool_delete(self.pool) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s' % self.pool_id) self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( self.ovn_lb.uuid) def test_pool_sync_exception(self): self.helper.ovn_nbdb_api.db_set.side_effect = [ RuntimeError("ERROR_MSG"), RuntimeError("ERROR_MSG")] with mock.patch.object(ovn_helper, 'LOG') as m_l: self.assertIsNone(self.helper.pool_sync(self.pool, self.ovn_lb)) m_l.exception.assert_called_once_with( 'Failed to execute commands for pool sync: ERROR_MSG') def test_pool_sync(self): self.helper.pool_sync(self.pool, self.ovn_lb) listener_key_value = (f"80:pool_{self.pool_id}:" f"{ovn_const.DISABLED_RESOURCE_SUFFIX}") expected_calls = [mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': True, f"pool_{self.pool_id}": mock.ANY, f"listener_{self.listener_id}": listener_key_value, ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}' % (self.member_id, constants.NO_MONITOR), f"pool_{self.pool_id}:{ovn_const.DISABLED_RESOURCE_SUFFIX}": '' })) ] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_pool_sync_press_key(self): self.pool[constants.SESSION_PERSISTENCE] = { constants.PERSISTENCE_TIMEOUT: '360' } self.ovn_lb.options = {'a': 1} self.helper.pool_sync(self.pool, self.ovn_lb) listener_key_value = (f"80:pool_{self.pool_id}:" f"{ovn_const.DISABLED_RESOURCE_SUFFIX}") expected_calls = [mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY, ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, 'enabled': True, f"pool_{self.pool_id}": mock.ANY, f"listener_{self.listener_id}": listener_key_value, ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}' % (self.member_id, constants.NO_MONITOR), f"pool_{self.pool_id}:{ovn_const.DISABLED_RESOURCE_SUFFIX}": '' })), mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('options', {'a': 1, 'affinity_timeout': '360'})) ] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_create_disabled(self, net_cli): net_cli.return_value.show_subnet.side_effect = [idlutils.RowNotFound] self._update_external_ids_member_status(self.ovn_lb, self.member['id'], 'offline') self.member['admin_state_up'] = False status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.OFFLINE) self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_create_lb_add_from_lr(self, net_cli, f_lr, folbpi): fake_subnet = fakes.FakeSubnet.create_one_subnet() net_cli.return_value.get_subnet.return_value = fake_subnet f_lr.return_value = self.router pool_key = 'pool_%s' % self.pool_id folbpi.return_value = (pool_key, self.ovn_lb) self.ovn_lb.external_ids = mock.MagicMock() status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) f_lr.assert_called_once_with(self.network, fake_subnet['gateway_ip']) self.helper._update_lb_to_lr_association.assert_called_once_with( self.ovn_lb, self.router) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_create_lb_add_from_lr_no_ls(self, net_cli, f_lr, f_ls): fake_subnet = fakes.FakeSubnet.create_one_subnet() net_cli.return_value.get_subnet.return_value = fake_subnet self.ovn_lb.external_ids = mock.MagicMock() (self.helper.ovn_nbdb_api.ls_get.return_value. execute.side_effect) = [openstack.exceptions.ResourceNotFound] status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) (self.helper.ovn_nbdb_api.ls_get.return_value.execute. assert_called_once_with(check_error=True)) f_lr.assert_not_called() f_ls.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_add_member') def test_member_create_exception(self, mock_add_member): mock_add_member.side_effect = [RuntimeError] self._update_external_ids_member_status(self.ovn_lb, self.member_id, 'error') status = self.helper.member_create(self.member) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ERROR) def test_member_create_lb_disabled(self): self.helper._find_ovn_lb_with_pool_key.side_effect = [ None, self.ovn_lb] self.helper.member_create(self.member) self.helper._find_ovn_lb_with_pool_key.assert_has_calls( [mock.call('pool_%s' % self.pool_id), mock.call('pool_%s%s' % (self.pool_id, ':D'))]) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_create_lb_add_from_lr_retry(self, net_cli, f_lr, folbpi): fake_subnet = fakes.FakeSubnet.create_one_subnet() net_cli.return_value.get_subnet.return_value = fake_subnet f_lr.return_value = self.router pool_key = 'pool_%s' % self.pool_id folbpi.return_value = (pool_key, self.ovn_lb) self.helper._update_lb_to_lr_association.side_effect = [ idlutils.RowNotFound] self.ovn_lb.external_ids = mock.MagicMock() status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) f_lr.assert_called_once_with(self.network, fake_subnet.gateway_ip) self.helper._update_lb_to_lr_association.assert_called_once_with( self.ovn_lb, self.router) self.helper._update_lb_to_lr_association_by_step \ .assert_called_once_with( self.ovn_lb, self.router) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_create(self, net_cli): net_cli.return_value.get_subnet.side_effect = [idlutils.RowNotFound] status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['members'][0]['id'], self.member_id) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.NO_MONITOR) def test_member_create_already_exists(self): status = self.helper.member_create(self.member) member_status = { ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}' % (self.member_id, constants.NO_MONITOR)} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', member_status)) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.NO_MONITOR) def test_member_create_first_member_in_pool(self): self.ovn_lb.external_ids.update({ 'pool_' + self.pool_id: ''}) self.helper.member_create(self.member) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'pool_%s' % self.pool_id: self.member_line})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', { '10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_member_create_second_member_in_pool(self): member2_id = uuidutils.generate_uuid() member2_subnet_id = uuidutils.generate_uuid() member2_port = '1010' member2_address = '192.168.2.150' member2_line = ('member_%s_%s:%s_%s' % (member2_id, member2_address, member2_port, member2_subnet_id)) self.ovn_lb.external_ids.update( {'pool_%s' % self.pool_id: member2_line}) self.helper.member_create(self.member) all_member_line = ( '%s,member_%s_%s:%s_%s' % (member2_line, self.member_id, self.member_address, self.member_port, self.member_subnet_id)) # We have two members now. expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', { 'pool_%s' % self.pool_id: all_member_line})), mock.call( 'Load_Balancer', self.ovn_lb.uuid, ('vips', { '10.22.33.4:80': '192.168.2.150:1010,192.168.2.149:1010', '123.123.123.123:80': '192.168.2.150:1010,192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) def test_member_update(self): status = self.helper.member_update(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.NO_MONITOR) self.member['admin_state_up'] = False self._update_external_ids_member_status(self.ovn_lb, self.member_id, 'offline') status = self.helper.member_update(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.OFFLINE) self.member.pop('admin_state_up') status = self.helper.member_update(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.member['old_admin_state_up'] = False self.member['admin_state_up'] = True self._update_external_ids_member_status(self.ovn_lb, self.member_id, 'online') status = self.helper.member_update(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.NO_MONITOR) fake_member = fakes.FakeMember( uuid=self.member_id, admin_state_up=True, address=self.member_address, protocol_port=self.member_port) self.octavia_driver_lib.get_member.return_value = fake_member self.member['old_admin_state_up'] = None status = self.helper.member_update(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.NO_MONITOR) def test_member_update_disabled_lb(self): self.helper._find_ovn_lb_with_pool_key.side_effect = [ None, self.ovn_lb] self.helper.member_update(self.member) self.helper._find_ovn_lb_with_pool_key.assert_has_calls( [mock.call('pool_%s' % self.pool_id), mock.call('pool_%s%s' % (self.pool_id, ':D'))]) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_member_status') def test_member_update_exception(self, mock_find_member_status): mock_find_member_status.side_effect = [TypeError] status = self.helper.member_update(self.member) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_refresh_lb_vips') def test_member_delete(self, mock_vip_command): status = self.helper.member_delete(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.DELETED) @mock.patch.object(ovn_helper.OvnProviderHelper, '_remove_member') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_external_ids_member_status') def test_member_delete_one_left(self, update_external_ids_members, rmmember): member2_id = uuidutils.generate_uuid() member2_port = '1010' member2_address = '192.168.2.150' member2_subnet_id = uuidutils.generate_uuid() member_line = ( 'member_%s_%s:%s_%s,member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id, member2_id, member2_address, member2_port, member2_subnet_id)) self.ovn_lb.external_ids.update({ 'pool_' + self.pool_id: member_line}) status = self.helper.member_delete(self.member) rmmember.assert_called_once_with( self.member, self.ovn_lb, 'pool_' + self.pool_id) update_external_ids_members.assert_called_once_with( self.ovn_lb, self.member_id, None, delete=True) self.assertEqual(status['members'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_member') def test_member_delete_hm(self, uhm, folbpi): pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = self.member_line self.ovn_hm_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \ '{"%s": "%s"}' % (self.member_id, constants.ONLINE) folbpi.return_value = (pool_key, self.ovn_hm_lb) self.helper.member_delete(self.member) uhm.assert_called_once_with(self.ovn_hm_lb, pool_key, self.member_address, delete=True) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_member') @mock.patch.object(ovn_helper.OvnProviderHelper, '_clean_up_hm_port') def test_member_delete_keep_hm_port(self, del_hm_port, uhm, folbpi): pool_key = 'pool_%s' % self.pool_id member2_id = uuidutils.generate_uuid() member2_address = '192.168.2.150' member2_line = ( 'member_%s_%s:%s_%s' % (member2_id, member2_address, self.member_port, self.member_subnet_id)) self.ovn_hm_lb.external_ids[pool_key] = ','.join([self.member_line, member2_line]) self.ovn_hm_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \ '{"%s": "%s","%s": "%s"}' % (self.member_id, constants.ONLINE, member2_id, constants.ONLINE) folbpi.return_value = (pool_key, self.ovn_hm_lb) self.helper.member_delete(self.member) uhm.assert_called_once_with(self.ovn_hm_lb, pool_key, self.member_address, delete=True) del_hm_port.assert_not_called() def test_member_delete_not_found_in_pool(self): self.ovn_lb.external_ids.update({'pool_' + self.pool_id: ''}) self.ovn_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = '{}' status = self.helper.member_delete(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.DELETED) @mock.patch.object(ovn_helper.OvnProviderHelper, '_remove_member') def test_member_delete_exception(self, mock_remove_member): mock_remove_member.side_effect = [RuntimeError] status = self.helper.member_delete(self.member) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ERROR) def test_member_delete_disabled_lb(self): self.helper._find_ovn_lb_with_pool_key.side_effect = [ None, self.ovn_lb] self.helper.member_delete(self.member) self.helper._find_ovn_lb_with_pool_key.assert_has_calls( [mock.call('pool_%s' % self.pool_id), mock.call('pool_%s%s' % (self.pool_id, ':D'))]) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_member_sync_exist_member(self, folbpi, net_cli, m_flrols): m_flrols.return_value = self.router pool_key = 'pool_%s' % self.pool_id self.ovn_lb.external_ids.update( {pool_key: self.member_line}) folbpi.return_value = (pool_key, self.ovn_lb) self.helper.member_sync(self.member, self.ovn_lb, pool_key) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', { '10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) self.assertEqual( self.helper._update_lb_to_lr_association.call_count, 1) self.assertEqual( self.helper._update_lb_to_lr_association_by_step.call_count, 0) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_member_sync_exist_member_lr_error( self, folbpi, net_cli, m_flrols ): self.helper._update_lb_to_lr_association.side_effect = [ idlutils.RowNotFound] m_flrols.return_value = self.router pool_key = 'pool_%s' % self.pool_id self.ovn_lb.external_ids.update( {pool_key: self.member_line}) folbpi.return_value = (pool_key, self.ovn_lb) self.helper.member_sync(self.member, self.ovn_lb, pool_key) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', { '10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) self.assertEqual( self.helper._update_lb_to_lr_association.call_count, 1) self.assertEqual( self.helper._update_lb_to_lr_association_by_step.call_count, 1) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_member_sync_exist_member_lr_not_found( self, folbpi, net_cli, m_flrols): self.helper._update_lb_to_lr_association.side_effect = [ idlutils.RowNotFound] m_flrols.side_effect = [idlutils.RowNotFound] pool_key = 'pool_%s' % self.pool_id self.ovn_lb.external_ids.update( {pool_key: self.member_line}) folbpi.return_value = (pool_key, self.ovn_lb) self.helper.member_sync(self.member, self.ovn_lb, pool_key) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', { '10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) self.assertEqual( self.helper._update_lb_to_lr_association.call_count, 0) self.assertEqual( self.helper._update_lb_to_lr_association_by_step.call_count, 0) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_sync_new_member(self, net_cli): pool_key = 'pool_' + self.pool_id self.ovn_lb.external_ids.update({ pool_key: ''}) self.helper.member_sync(self.member, self.ovn_lb, pool_key) expected_calls = [ mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'pool_%s' % self.pool_id: self.member_line})), mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', { '10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_logical_router_port_event_create(self, net_cli): self.router_port_event = ovn_event.LogicalRouterPortEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {ovn_const.OVN_ROUTER_IS_EXT_GW: 'False'}}) self.router_port_event.run('create', row, mock.ANY) expected = { 'info': {'router': self.router, 'network': self.network, 'is_gw_port': False}, 'type': 'lb_create_lrp_assoc'} self.mock_add_request.assert_called_once_with(expected) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_logical_router_port_event_delete(self, net_cli): self.router_port_event = ovn_event.LogicalRouterPortEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={}) self.router_port_event.run('delete', row, mock.ANY) expected = { 'info': {'router': self.router, 'network': self.network}, 'type': 'lb_delete_lrp_assoc'} self.mock_add_request.assert_called_once_with(expected) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_logical_router_port_event_gw_port(self, net_cli): self.router_port_event = ovn_event.LogicalRouterPortEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {ovn_const.OVN_ROUTER_IS_EXT_GW: 'True'}}) self.router_port_event.run(mock.ANY, row, mock.ANY) expected = { 'info': {'router': self.router, 'network': self.network, 'is_gw_port': True}, 'type': 'lb_create_lrp_assoc'} self.mock_add_request.assert_called_once_with(expected) def test__get_pool_listeners(self): self._get_pool_listeners.stop() self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: 'fc00::', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '2002::', 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} ret = self.helper._get_pool_listeners( self.ovn_lb, 'pool_%s' % self.pool_id) self.assertEqual([self.listener_id], ret) def test__get_pool_listeners_not_found(self): self._get_pool_listeners.stop() self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: 'fc00::', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '2002::', 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} ret = self.helper._get_pool_listeners( self.ovn_lb, 'pool_foo') self.assertEqual([], ret) def test___get_pool_listener_port(self): self._get_pool_listeners.stop() self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: 'fc00::', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '2002::', 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} ret = self.helper._get_pool_listener_port( self.ovn_lb, 'pool_foo') self.assertIsNone(ret) def test__get_nw_router_info_on_interface_event(self): self.mock_get_nw.stop() lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'network1'} }) self.helper._get_nw_router_info_on_interface_event(lrp) expected_calls = [ mock.call.lookup('Logical_Router', 'neutron-router1'), mock.call.lookup('Logical_Switch', 'network1')] self.helper.ovn_nbdb_api.assert_has_calls(expected_calls) def test__get_nw_router_info_on_interface_event_map_key_error(self): self.mock_get_nw.stop() lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'network1'} }) lrp._table = mock.MagicMock(name='foo') self.assertRaises( idlutils.RowNotFound, self.helper._get_nw_router_info_on_interface_event, lrp) def test__get_nw_router_info_on_interface_event_not_found(self): self.mock_get_nw.stop() self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'} }) self.assertRaises( idlutils.RowNotFound, self.helper._get_nw_router_info_on_interface_event, lrp) def test_lb_delete_lrp_assoc_handler(self): lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.helper.lb_delete_lrp_assoc_handler(lrp) expected = { 'info': {'router': self.router, 'network': self.network}, 'type': 'lb_delete_lrp_assoc'} self.mock_add_request.assert_called_once_with(expected) def test_lb_delete_lrp_assoc_handler_info_not_found(self): self.mock_get_nw.stop() self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'} }) self.helper.lb_delete_lrp_assoc_handler(lrp) self.mock_add_request.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_delete_lrp_assoc_no_net_lb_no_r_lb(self, mock_execute): info = { 'network': self.network, 'router': self.router, } self.network.load_balancer = [] self.router.load_balancer = [] self.helper.lb_delete_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_not_called() mock_execute.assert_not_called() def test_lb_delete_lrp_assoc_no_net_lb_r_lb(self): info = { 'network': self.network, 'router': self.router, } self.network.load_balancer = [] self.helper.lb_delete_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_not_called() self.helper._update_lb_to_ls_association.assert_called_once_with( self.router.load_balancer[0], network_id=info['network'].uuid, associate=False, update_ls_ref=False ) @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_delete_lrp_assoc_net_lb_no_r_lb(self, mock_execute): info = { 'network': self.network, 'router': self.router, } self.router.load_balancer = [] self.helper.lb_delete_lrp_assoc(info) mock_execute.assert_not_called() self.helper._update_lb_to_lr_association.assert_called_once_with( self.network.load_balancer[0], self.router, delete=True ) def test_lb_delete_lrp_assoc_r_lb_exception(self): info = { 'network': self.network, 'router': self.router, } self.helper._update_lb_to_ls_association.side_effect = [ idlutils.RowNotFound] with self.assertLogs(level='WARN') as cm: self.helper.lb_delete_lrp_assoc(info) self.assertEqual( cm.output, ['WARNING:ovn_octavia_provider.helper:' 'The disassociation of loadbalancer ' '%s to the logical switch %s failed, just keep going on' % (self.router.load_balancer[0].uuid, self.network.uuid)]) def test_lb_delete_lrp_assoc(self): info = { 'network': self.network, 'router': self.router, } self.helper.lb_delete_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_called_once_with( self.network.load_balancer[0], self.router, delete=True ) self.helper._update_lb_to_ls_association.assert_called_once_with( self.router.load_balancer[0], network_id=self.network.uuid, associate=False, update_ls_ref=False ) def test_lb_delete_lrp_assoc_ls_by_step(self): self._update_lb_to_ls_association.stop() info = { 'network': self.network, 'router': self.router, } self.helper._update_lb_to_lr_association.side_effect = [ idlutils.RowNotFound] self.helper.lb_delete_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_called_once_with( self.network.load_balancer[0], self.router, delete=True ) self.helper._update_lb_to_lr_association_by_step \ .assert_called_once_with( self.network.load_balancer[0], self.router, delete=True) def test_lb_create_lrp_assoc_handler(self): lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {ovn_const.OVN_ROUTER_IS_EXT_GW: 'False'}}) self.helper.lb_create_lrp_assoc_handler(lrp) expected = { 'info': {'router': self.router, 'network': self.network, 'is_gw_port': False}, 'type': 'lb_create_lrp_assoc'} self.mock_add_request.assert_called_once_with(expected) def test_lb_create_lrp_assoc_handler_row_not_found(self): self.mock_get_nw.stop() self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'} }) self.helper.lb_create_lrp_assoc_handler(lrp) self.mock_add_request.assert_not_called() def test_lb_create_lrp_assoc(self): info = { 'network': self.network, 'router': self.router, 'is_gw_port': False, } self.helper.lb_create_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_called_once_with( self.network.load_balancer[0], self.router ) def test_lb_create_lrp_assoc_r_lb_exception(self): info = { 'network': self.network, 'router': self.router, 'is_gw_port': False, } self.helper._update_lb_to_ls_association.side_effect = [ idlutils.RowNotFound] with self.assertLogs(level='WARN') as cm: self.helper.lb_create_lrp_assoc(info) self.assertEqual( cm.output, ['WARNING:ovn_octavia_provider.helper:' 'The association of loadbalancer ' '%s to the logical switch %s failed, just keep going on' % (self.router.load_balancer[0].uuid, self.network.uuid)]) def test_lb_create_lrp_assoc_ls_by_step(self): self._update_lb_to_ls_association.stop() info = { 'network': self.network, 'router': self.router, 'is_gw_port': True, } self.helper._update_lb_to_lr_association.side_effect = [ idlutils.RowNotFound] self.helper.lb_create_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_called_once_with( self.network.load_balancer[0], self.router ) self.helper._update_lb_to_lr_association_by_step \ .assert_called_once_with( self.network.load_balancer[0], self.router) def test_lb_create_lrp_assoc_uniq_lb(self): info = { 'network': self.network, 'router': self.router, 'is_gw_port': True, } # Make it already uniq. self.network.load_balancer = self.router.load_balancer self.helper.lb_create_lrp_assoc(info) self.helper._update_lb_to_lr_association.assert_not_called() def test__find_lb_in_ls(self): net_lb = self.helper._find_lb_in_ls(self.network) for lb in self.network.load_balancer: self.assertIn(lb, net_lb) def test__find_lb_in_ls_wrong_ref(self): # lets break external_ids refs self.network.load_balancer[0].external_ids.update({ ovn_const.LB_EXT_IDS_LS_REFS_KEY: 'foo'}) net_lb = self.helper._find_lb_in_ls(self.network) for lb in self.network.load_balancer: self.assertNotIn(lb, net_lb) def test__find_ls_for_lr(self): p1 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': [], 'ha_chassis_group': [], 'external_ids': { ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'foo1'}, 'networks': ["10.0.0.1/24"]}) p2 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': [], 'ha_chassis_group': [], 'external_ids': { ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'foo2'}, 'networks': ["10.0.10.1/24"]}) self.router.ports.append(p1) self.router.ports.append(p2) res = self.helper._find_ls_for_lr(self.router, n_const.IP_VERSION_4) self.assertListEqual(['neutron-foo1', 'neutron-foo2'], res) def test__find_ls_for_lr_net_not_found(self): p1 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': [], 'ha_chassis_group': [], 'external_ids': { ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'foo1'}, 'networks': ["10.0.0.1/24"]}) p2 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': [], 'ha_chassis_group': [], 'external_ids': {}, 'networks': ["10.0.10.1/24"]}) self.router.ports.append(p2) self.router.ports.append(p1) res = self.helper._find_ls_for_lr(self.router, n_const.IP_VERSION_4) self.assertListEqual(['neutron-foo1'], res) def test__find_ls_for_lr_different_ip_version(self): p1 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': [], 'ha_chassis_group': [], 'external_ids': { ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'foo1'}, 'networks': ["10.0.0.1/24"]}) p2 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': [], 'ha_chassis_group': [], 'external_ids': { ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'foo2'}, 'networks': ["fdaa:4ad8:e8fb::/64"]}) self.router.ports.append(p2) self.router.ports.append(p1) res = self.helper._find_ls_for_lr(self.router, n_const.IP_VERSION_4) self.assertListEqual(['neutron-foo1'], res) res = self.helper._find_ls_for_lr(self.router, n_const.IP_VERSION_6) self.assertListEqual(['neutron-foo2'], res) def test__find_ls_for_lr_gw_port(self): p1 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': ['foo-gw-chassis'], 'ha_chassis_group': [], 'external_ids': { ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'foo1'}, 'networks': ["10.0.0.1/24"]}) self.router.ports.append(p1) result = self.helper._find_ls_for_lr(self.router, n_const.IP_VERSION_4) self.assertListEqual([], result) def test__find_ls_for_lr_gw_port_ha_chassis_group(self): p1 = fakes.FakeOVNPort.create_one_port(attrs={ 'gateway_chassis': [], 'ha_chassis_group': 'foo-chassis-group', 'external_ids': { ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'foo1'}, 'networks': ["10.0.0.1/24"]}) self.router.ports.append(p1) result = self.helper._find_ls_for_lr(self.router, n_const.IP_VERSION_4) self.assertListEqual([], result) @mock.patch.object( ovn_helper.OvnProviderHelper, '_del_lb_to_lr_association') @mock.patch.object( ovn_helper.OvnProviderHelper, '_add_lb_to_lr_association') def test__get_lb_to_lr_association_commands(self, add, delete): self._get_lb_to_lr_association_commands.stop() self.helper._get_lb_to_lr_association_commands( self.ref_lb1, self.router) lr_ref = self.ref_lb1.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY) add.assert_called_once_with(self.ref_lb1, self.router, lr_ref, is_sync=False) delete.assert_not_called() @mock.patch.object( ovn_helper.OvnProviderHelper, '_del_lb_to_lr_association') @mock.patch.object( ovn_helper.OvnProviderHelper, '_add_lb_to_lr_association') def test__get_lb_to_lr_association_commands_delete(self, add, delete): self._get_lb_to_lr_association_commands.stop() self.helper._get_lb_to_lr_association_commands( self.ref_lb1, self.router, delete=True) lr_ref = self.ref_lb1.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY) add.assert_not_called() delete.assert_called_once_with(self.ref_lb1, self.router, lr_ref) @mock.patch.object( ovn_helper.OvnProviderHelper, '_del_lb_to_lr_association') @mock.patch.object( ovn_helper.OvnProviderHelper, '_add_lb_to_lr_association') def test__get_lb_to_lr_association_commands_by_step( self, add, delete): self._update_lb_to_lr_association_by_step.stop() self._get_lb_to_lr_association_commands.stop() self.helper._update_lb_to_lr_association_by_step( self.ref_lb1, self.router) lr_ref = self.ref_lb1.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY) add.assert_called_once_with(self.ref_lb1, self.router, lr_ref, is_sync=False) delete.assert_not_called() @mock.patch.object( ovn_helper.OvnProviderHelper, '_del_lb_to_lr_association') @mock.patch.object( ovn_helper.OvnProviderHelper, '_add_lb_to_lr_association') def test__get_lb_to_lr_association_commands_by_step_delete( self, add, delete): self._update_lb_to_lr_association_by_step.stop() self._get_lb_to_lr_association_commands.stop() self.helper._update_lb_to_lr_association_by_step( self.ref_lb1, self.router, delete=True) lr_ref = self.ref_lb1.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY) add.assert_not_called() delete.assert_called_once_with(self.ref_lb1, self.router, lr_ref) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__del_lb_to_lr_association(self, net_cli): lr_ref = self.ref_lb1.external_ids.get( ovn_const.LB_EXT_IDS_LR_REF_KEY) upd_lr_ref = '%s,%s' % (lr_ref, self.router.name) self.helper._del_lb_to_lr_association( self.ref_lb1, self.router, upd_lr_ref) expected_calls = [ mock.call.db_set( 'Load_Balancer', self.ref_lb1.uuid, (('external_ids', {ovn_const.LB_EXT_IDS_LR_REF_KEY: lr_ref}))), mock.call.lr_lb_del( self.router.uuid, self.ref_lb1.uuid, if_exists=True)] self.helper.ovn_nbdb_api.assert_has_calls( expected_calls) self.helper.ovn_nbdb_api.db_remove.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__del_lb_to_lr_association_no_lr_ref(self, net_cli): lr_ref = '' self.helper._del_lb_to_lr_association( self.ref_lb1, self.router, lr_ref) self.helper.ovn_nbdb_api.db_set.assert_not_called() self.helper.ovn_nbdb_api.db_remove.assert_not_called() self.helper.ovn_nbdb_api.lr_lb_del.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__del_lb_to_lr_association_lr_ref_empty_after(self, net_cli): lr_ref = self.router.name self.helper._del_lb_to_lr_association( self.ref_lb1, self.router, lr_ref) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, 'external_ids', ovn_const.LB_EXT_IDS_LR_REF_KEY) self.helper.ovn_nbdb_api.lr_lb_del.assert_called_once_with( self.router.uuid, self.ref_lb1.uuid, if_exists=True) self.helper.ovn_nbdb_api.db_set.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__del_lb_to_lr_association_from_ls(self, f_ls): # This test if LB is deleted from Logical_Router_Port # Logical_Switch. f_ls.return_value = ['neutron-xyz', 'neutron-qwr'] self.helper._del_lb_to_lr_association(self.ref_lb1, self.router, '') self.helper.ovn_nbdb_api.ls_lb_del.assert_has_calls([ (mock.call('neutron-xyz', self.ref_lb1.uuid, if_exists=True)), (mock.call('neutron-qwr', self.ref_lb1.uuid, if_exists=True))]) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__add_lb_to_lr_association(self, f_ls): lr_ref = 'foo' f_ls.return_value = ['neutron-xyz', 'neutron-qwr'] self.helper._add_lb_to_lr_association( self.ref_lb1, self.router, lr_ref) self.helper.ovn_nbdb_api.lr_lb_add.assert_called_once_with( self.router.uuid, self.ref_lb1.uuid, may_exist=True) self.helper.ovn_nbdb_api.ls_lb_add.assert_has_calls([ (mock.call('neutron-xyz', self.ref_lb1.uuid, may_exist=True)), (mock.call('neutron-qwr', self.ref_lb1.uuid, may_exist=True))]) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', {'lr_ref': 'foo,%s' % self.router.name})) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__add_lb_to_lr_association_sync_lb_not_in_lr(self, f_ls): lr_ref = 'foo' f_ls.return_value = ['neutron-%s' % self.network.uuid, 'neutron-%s' % self.network2.uuid] self.router.load_balancer = [] self.helper._add_lb_to_lr_association( self.ref_lb1, self.router, lr_ref, is_sync=True) self.helper.ovn_nbdb_api.lr_lb_add.assert_called_once_with( self.router.uuid, self.ref_lb1.uuid, may_exist=True) self.helper.ovn_nbdb_api.ls_lb_add.assert_has_calls([ (mock.call('neutron-%s' % self.network.uuid, self.ref_lb1.uuid, may_exist=True)), (mock.call('neutron-%s' % self.network2.uuid, self.ref_lb1.uuid, may_exist=True))]) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', {'lr_ref': 'foo,%s' % self.router.name})) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__add_lb_to_lr_association_sync_lb_in_lr(self, f_ls): lr_ref = 'foo' f_ls.return_value = ['neutron-%s' % self.network.uuid, 'neutron-%s' % self.network2.uuid] self.router.load_balancer = [self.ref_lb1] self.helper._add_lb_to_lr_association( self.ref_lb1, self.router, lr_ref, is_sync=True) self.helper.ovn_nbdb_api.lr_lb_add.assert_not_called() self.helper.ovn_nbdb_api.ls_lb_add.assert_has_calls([ (mock.call('neutron-%s' % self.network.uuid, self.ref_lb1.uuid, may_exist=True)), (mock.call('neutron-%s' % self.network2.uuid, self.ref_lb1.uuid, may_exist=True))]) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', {'lr_ref': 'foo,%s' % self.router.name})) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__add_lb_to_lr_association_sync_lb_in_ls(self, f_ls): lr_ref = 'foo' f_ls.return_value = ['neutron-%s' % self.network.uuid, ] (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.router.load_balancer = [self.ref_lb2] self.helper._add_lb_to_lr_association( self.ref_lb2, self.router, lr_ref, is_sync=True) self.helper.ovn_nbdb_api.lr_lb_add.assert_not_called() self.helper.ovn_nbdb_api.ls_lb_add.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb2.uuid, ('external_ids', {'lr_ref': 'foo,%s' % self.router.name})) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__add_lb_to_lr_association_sync_ls_no_found_exception(self, f_ls): lr_ref = 'foo' f_ls.return_value = ['neutron-%s' % self.network.uuid, ] (self.helper.ovn_nbdb_api.ls_get.return_value.execute. side_effect) = [idlutils.RowNotFound] self.router.load_balancer = [self.ref_lb2] with mock.patch.object(ovn_helper, 'LOG') as m_l: self.helper._add_lb_to_lr_association( self.ref_lb2, self.router, lr_ref, is_sync=True) m_l.warning.assert_called_once_with( "LogicalSwitch %s could not be found.", "neutron-%s" % self.network.uuid) self.helper.ovn_nbdb_api.lr_lb_add.assert_not_called() self.helper.ovn_nbdb_api.ls_lb_add.assert_has_calls([ (mock.call('neutron-%s' % self.network.uuid, self.ref_lb2.uuid, may_exist=True))]) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb2.uuid, ('external_ids', {'lr_ref': 'foo,%s' % self.router.name})) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__add_lb_to_lr_association_lr_already_associated(self, f_ls): self.ref_lb1.external_ids.update({ ovn_const.LB_EXT_IDS_LR_REF_KEY: self.router.name}) lr_ref = self.ref_lb1.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY) f_ls.return_value = ['neutron-xyz', 'neutron-qwr'] self.helper._add_lb_to_lr_association( self.ref_lb1, self.router, lr_ref) self.helper.ovn_nbdb_api.lr_lb_add.assert_called_once_with( self.router.uuid, self.ref_lb1.uuid, may_exist=True) self.helper.ovn_nbdb_api.ls_lb_add.assert_has_calls([ (mock.call('neutron-xyz', self.ref_lb1.uuid, may_exist=True)), (mock.call('neutron-qwr', self.ref_lb1.uuid, may_exist=True))]) self.helper.ovn_nbdb_api.db_set.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__add_lb_to_lr_association_no_lr_rf(self, f_ls): lr_ref = '' f_ls.return_value = ['neutron-xyz', 'neutron-qwr'] self.helper._add_lb_to_lr_association( self.ref_lb1, self.router, lr_ref) self.helper.ovn_nbdb_api.lr_lb_add.assert_called_once_with( self.router.uuid, self.ref_lb1.uuid, may_exist=True) self.helper.ovn_nbdb_api.ls_lb_add.assert_has_calls([ (mock.call('neutron-xyz', self.ref_lb1.uuid, may_exist=True)), (mock.call('neutron-qwr', self.ref_lb1.uuid, may_exist=True))]) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', {'lr_ref': '%s' % self.router.name})) def test__extract_listener_key_value(self): self.assertEqual( (None, None), self.helper._extract_listener_key_value('listener')) self.assertEqual( ('listener', '123'), self.helper._extract_listener_key_value('listener:123')) def test__find_lr_of_ls(self): lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', 'neutron:cidrs': '10.10.10.1/24', ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': 'lrp-foo-name'}, }) lsp2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router2', 'neutron:cidrs': '10.10.10.2/24', ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': 'lrp-bar-name'}, }) lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'name': 'lrp-foo-name', }) lr = fakes.FakeOVNRouter.create_one_router( attrs={ 'name': 'router1', 'ports': [lrp]}) lrp2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'name': 'lrp-foo-name2', }) lr2 = fakes.FakeOVNRouter.create_one_router( attrs={ 'name': 'router2', 'ports': [lrp2]}) ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [lsp2, lsp]}) (self.helper.ovn_nbdb_api.get_lrs.return_value. execute.return_value) = [lr2, lr] returned_lr = self.helper._find_lr_of_ls(ls, '10.10.10.1') self.assertEqual(lr, returned_lr) def test__find_lr_of_ls_multiple_address_ipv4(self): lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', 'neutron:cidrs': ( '10.10.10.1/24 10.10.20.1/24' ), ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': 'lrp-foo-name'}, }) lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'name': 'lrp-foo-name', }) lr = fakes.FakeOVNRouter.create_one_router( attrs={ 'name': 'router1', 'ports': [lrp]}) ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [lsp]}) (self.helper.ovn_nbdb_api.get_lrs.return_value. execute.return_value) = [lr] returned_lr = self.helper._find_lr_of_ls(ls, '10.10.20.1') self.assertEqual(lr, returned_lr) def test__find_lr_of_ls_multiple_address_ipv6(self): lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', 'neutron:cidrs': ( 'fd61:5fe4:978c:a334:0:3eff:24ab:f816/64 ' 'fd8b:8a01:ab1d:0:f816:3eff:fe3d:24ab/64' ), ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': 'lrp-foo-name'}, }) lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'name': 'lrp-foo-name', }) lr = fakes.FakeOVNRouter.create_one_router( attrs={ 'name': 'router1', 'ports': [lrp]}) ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [lsp]}) (self.helper.ovn_nbdb_api.get_lrs.return_value. execute.return_value) = [lr] returned_lr = self.helper._find_lr_of_ls( ls, 'fd61:5fe4:978c:a334:0:3eff:24ab:f816') self.assertEqual(lr, returned_lr) def test__find_lr_of_ls_no_lrs(self): lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', 'neutron:cidrs': '10.10.10.1/24', ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': 'lrp-foo-name'}, }) lsp2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router2', 'neutron:cidrs': '10.10.10.2/24', ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': 'lrp-bar-name'}, }) ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [lsp2, lsp]}) (self.helper.ovn_nbdb_api.get_lrs.return_value. execute.return_value) = [] returned_lr = self.helper._find_lr_of_ls(ls, '10.10.10.1') self.assertIsNone(returned_lr) def test__find_lr_of_ls_gw_port_id(self): lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': 'lrp-lrp-foo-name'} }) lr = fakes.FakeOVNRouter.create_one_router( attrs={ 'name': 'router1', 'ports': [], 'external_ids': { 'neutron:gw_port_id': 'lrp-foo-name'}}) ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [lsp]}) (self.helper.ovn_nbdb_api.get_lrs.return_value. execute.return_value) = [lr] returned_lr = self.helper._find_lr_of_ls(ls) self.assertEqual(lr, returned_lr) def test__find_lr_of_ls_no_lrp_name(self): lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'router', 'options': { 'router-port': None} }) ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [lsp]}) returned_lr = self.helper._find_lr_of_ls(ls) self.assertIsNone(returned_lr) def test__find_lr_of_ls_no_router_type_port(self): lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: n_const.DEVICE_OWNER_ROUTER_INTF}, 'type': 'foo', 'options': { 'router-port': None} }) ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [lsp]}) returned_lr = self.helper._find_lr_of_ls(ls) self.assertIsNone(returned_lr) def test__find_lr_of_ls_no_lrp(self): ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': []}) returned_lr = self.helper._find_lr_of_ls(ls) (self.helper.ovn_nbdb_api.tables['Logical_Router'].rows. values.assert_not_called()) self.assertIsNone(returned_lr) def test__get_lb_to_ls_association_command_empty_network_and_subnet(self): self._get_lb_to_ls_association_commands.stop() returned_commands = self.helper._get_lb_to_ls_association_commands( self.ref_lb1, associate=True, update_ls_ref=True) self.assertListEqual(returned_commands, []) def test__get_members_in_ovn_lb_no_members(self): self.ovn_lb.externals_ids = {} result = self.helper._get_members_in_ovn_lb(self.ovn_lb, None) self.assertEqual([], result) def test__get_member_info(self): fake_member = fakes.FakeMember( uuid=self.member['id'], member_id=self.member['id'], admin_state_up=True, name='member_2', project_id=self.project_id, address=self.member['address'], protocol_port=self.member['protocol_port'], subnet_id=self.member['subnet_id']) result = ( ovn_const.LB_EXT_IDS_MEMBER_PREFIX + fake_member.member_id + '_' + fake_member.address + ':' + fake_member.protocol_port + '_' + fake_member.subnet_id) self.assertEqual( result, self.helper._get_member_info(fake_member)) result = ( ovn_const.LB_EXT_IDS_MEMBER_PREFIX + self.member['id'] + '_' + self.member['address'] + ':' + self.member['protocol_port'] + '_' + self.member['subnet_id']) self.assertEqual( result, self.helper._get_member_info(self.member)) self.assertEqual('', self.helper._get_member_info(None)) def test__update_lb_to_ls_association_network(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=True, update_ls_ref=True) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) ls_refs = {'ls_refs': '{"%s": 2}' % self.network.name} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', ls_refs)) def test__update_lb_to_ls_association_network_sync(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=True, update_ls_ref=True, is_sync=True) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.helper.ovn_nbdb_api.db_set.assert_not_called() def test__update_lb_to_ls_association_network_no_update_ls_ref(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=True, update_ls_ref=False) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.helper.ovn_nbdb_api.db_set.assert_not_called() def test__update_lb_to_ls_association_network_no_assoc_no_update_ls_ref( self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False, update_ls_ref=False) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.helper.ovn_nbdb_api.db_set.assert_not_called() def test__update_lb_to_ls_association_network_no_assoc_update_ls_ref( self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False, update_ls_ref=True) self.helper.ovn_nbdb_api.ls_lb_del.assert_called_once_with( self.network.uuid, self.ref_lb1.uuid, if_exists=True) ls_refs = {'ls_refs': '{}'} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', ls_refs)) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__update_lb_to_ls_association_subnet(self, net_cli): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() subnet = fakes.FakeSubnet.create_one_subnet( attrs={'id': 'foo_subnet_id', 'name': 'foo_subnet_name', 'network_id': 'foo_network_id'}) net_cli.return_value.get_subnet.return_value = subnet self.helper._update_lb_to_ls_association( self.ref_lb1, subnet_id=subnet.id, associate=True, update_ls_ref=True) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( 'neutron-foo_network_id') def test__update_lb_to_ls_association_empty_ls_refs_additional_vips(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.ref_lb1.external_ids.pop('ls_refs') self.ref_lb1.external_ids.update({ ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: 'foo, anotherfoo'}) self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, update_ls_ref=True, additional_vips=True) self.helper.ovn_nbdb_api.ls_lb_add.assert_called_once_with( self.network.uuid, self.ref_lb1.uuid, may_exist=True) ls_refs = {'ls_refs': '{"%s": 3}' % self.network.name} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', ls_refs)) def test__update_lb_to_ls_association_empty_ls_refs(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.ref_lb1.external_ids.pop('ls_refs') self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, update_ls_ref=True, additional_vips=True) self.helper.ovn_nbdb_api.ls_lb_add.assert_called_once_with( self.network.uuid, self.ref_lb1.uuid, may_exist=True) ls_refs = {'ls_refs': '{"%s": 1}' % self.network.name} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', ls_refs)) def test__update_lb_to_ls_association_empty_ls_refs_no_ls(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = None self.ref_lb1.external_ids.pop('ls_refs') self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, update_ls_ref=False) self.helper.ovn_nbdb_api.ls_lb_add.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_not_called() def test__update_lb_to_ls_association_no_ls(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. side_effect) = [idlutils.RowNotFound] returned_commands = self.helper._get_lb_to_ls_association_commands( self.ref_lb1, network_id=self.network.uuid, update_ls_ref=True) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.assertListEqual([], returned_commands) def test__update_lb_to_ls_association_network_disassociate(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False, update_ls_ref=True) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', {'ls_refs': '{}'})) self.helper.ovn_nbdb_api.ls_lb_del.assert_called_once_with( self.network.uuid, self.ref_lb1.uuid, if_exists=True) def test__update_lb_to_ls_association_net_disassoc_no_update_ls_ref(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False, update_ls_ref=False) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.helper.ovn_nbdb_api.db_set.assert_not_called() self.helper.ovn_nbdb_api.ls_lb_del.assert_called_once_with( self.network.uuid, self.ref_lb1.uuid, if_exists=True) def test__update_lb_to_ls_association_dissasoc_net_not_assoc(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.helper._update_lb_to_ls_association( self.ref_lb1, network_id='foo', associate=False, update_ls_ref=False) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( 'neutron-foo') self.helper.ovn_nbdb_api.db_set.assert_not_called() self.helper.ovn_nbdb_api.ls_lb_del.assert_called_once_with( self.network.uuid, self.ref_lb1.uuid, if_exists=True) def test__update_lb_to_ls_association_net_ls_ref_wrong_format(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.ref_lb1.external_ids.update({ ovn_const.LB_EXT_IDS_LS_REFS_KEY: '{\"neutron-%s\"}'}) self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False, update_ls_ref=False) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.helper.ovn_nbdb_api.db_set.assert_not_called() def test__update_lb_to_ls_association_network_dis_ls_not_found(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. side_effect) = [idlutils.RowNotFound] self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False, update_ls_ref=True) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', {'ls_refs': '{}'})) self.helper.ovn_nbdb_api.ls_lb_del.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__update_lb_to_ls_association_network_dis_net_not_found( self, net_cli): net_cli.return_value.get_subnet.side_effect = ( openstack.exceptions.ResourceNotFound) self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.helper._update_lb_to_ls_association( self.ref_lb1, subnet_id='foo', associate=False, update_ls_ref=True) self.helper.ovn_nbdb_api.ls_get.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_not_called() self.helper.ovn_nbdb_api.ls_lb_del.assert_not_called() def test__update_lb_to_ls_association_disassoc_ls_not_in_ls_refs(self): self._update_lb_to_ls_association.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network self.ref_lb1.external_ids.pop('ls_refs') self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False, update_ls_ref=True) self.helper.ovn_nbdb_api.ls_lb_del.assert_not_called() self.helper.ovn_nbdb_api.db_set.assert_not_called() def test__update_lb_to_ls_association_disassoc_multiple_refs(self): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() (self.helper.ovn_nbdb_api.ls_get.return_value.execute. return_value) = self.network # multiple refs ls_refs = {'ls_refs': '{"%s": 2}' % self.network.name} self.ref_lb1.external_ids.update(ls_refs) self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid, associate=False, update_ls_ref=True) self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( self.network.name) exp_ls_refs = {'ls_refs': '{"%s": 1}' % self.network.name} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', exp_ls_refs)) @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test__update_lb_to_ls_association_retry(self, execute): self._update_lb_to_ls_association.stop() self._get_lb_to_ls_association_commands.stop() self.helper._update_lb_to_ls_association( self.ref_lb1, network_id=self.network.uuid) expected = self.helper._get_lb_to_ls_association_commands( self.ref_lb1, network_id=self.network.uuid) execute.assert_called_once_with(expected) @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test__update_lb_to_ls_association_retry_failed(self, execute): execute.side_effect = [idlutils.RowNotFound for _ in range(4)] self._update_lb_to_ls_association.stop() self.assertRaises( idlutils.RowNotFound, self.helper._update_lb_to_ls_association, self.ref_lb1, network_id=self.network.uuid) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_logical_switch_port_update_event_vip_port_associate(self, net_cli): self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) fake_port = fakes.FakePort.create_one_port() net_cli.return_value.get_port.return_value = fake_port port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') fip = '10.0.0.1' attrs = { 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: fip}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) attrs_old = { 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} old = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs_old) self.switch_port_event.run(mock.ANY, row, old) expected_call = { 'info': {'action': 'associate', 'vip_related': [fake_port.fixed_ips[0]['ip_address']], 'additional_vip_fip': False, 'vip_fip': fip, 'ovn_lb': self.ovn_lb}, 'type': 'handle_vip_fip'} self.mock_add_request.assert_called_once_with(expected_call) def test_logical_switch_port_update_event_missing_port_name(self): self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) attrs = {'external_ids': {}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) self.assertFalse(self.switch_port_event.match_fn( mock.ANY, row, mock.ANY)) self.mock_add_request.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_logical_switch_port_update_event_disassociate(self, net_cli): self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) fake_port = fakes.FakePort.create_one_port() net_cli.return_value.get_port.return_value = fake_port port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') fip = '172.24.4.4' attrs = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) attrs_old = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: fip}} old = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs_old) self.switch_port_event.run(mock.ANY, row, old) expected_call = { 'info': {'action': 'disassociate', 'vip_fip': fip, 'additional_vip_fip': False, 'vip_related': [fake_port.fixed_ips[0]['ip_address']], 'ovn_lb': self.ovn_lb}, 'type': 'handle_vip_fip'} self.mock_add_request.assert_called_once_with(expected_call) def test_logical_switch_port_update_event_update_unrelated(self): self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') fip = '172.24.4.4' attrs = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: fip}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) attrs_old = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: fip}} old = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs_old) self.switch_port_event.run(mock.ANY, row, old) self.mock_add_request.assert_not_called() def test_logical_switch_port_update_event_without_external_ids(self): self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) attrs = {} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) old = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) self.switch_port_event.run(mock.ANY, row, old) self.mock_add_request.assert_not_called() def test_logical_switch_port_update_event_wrong_vip_port_name(self): self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) port_name = 'foo' row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}}) attrs_old = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: 'foo'}} old = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs_old) self.assertFalse(self.switch_port_event.match_fn(mock.ANY, row, old)) self.mock_add_request.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_delete_port_not_found(self, net_cli): net_cli.return_value.delete_port.side_effect = ( [openstack.exceptions.ResourceNotFound]) self.helper.delete_port('foo') @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_vip_port_update_handler_lb_not_found(self, lb): lb.side_effect = [idlutils.RowNotFound for _ in range(5)] self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') attrs = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) attrs_old = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: '172.24.4.40'}} old = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs_old) self.switch_port_event.run(mock.ANY, row, old) self.mock_add_request.assert_not_called() @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_vip_port_update_handler_no_port(self, net_cli, lb): lb1 = mock.MagicMock() lb.return_value = [lb1] self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) net_cli.return_value.get_port.return_value = None port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') attrs = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) attrs_old = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: '172.24.4.40'}} old = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs_old) self.switch_port_event.run(mock.ANY, row, old) expected = { 'type': 'handle_vip_fip', 'info': { 'action': ovn_const.REQ_INFO_ACTION_DISASSOCIATE, 'vip_fip': '172.24.4.40', 'vip_related': [], 'additional_vip_fip': False, 'ovn_lb': lb1 } } self.mock_add_request.assert_called_once_with(expected) @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test__update_lb_to_lr_association_retry(self, execute): self._update_lb_to_lr_association.stop() self._get_lb_to_lr_association_commands.stop() self.helper._update_lb_to_lr_association(self.ref_lb1, self.router) expected = self.helper._get_lb_to_lr_association_commands( self.ref_lb1, self.router) execute.assert_called_once_with(expected) @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test__update_lb_to_lr_association_retry_failed(self, execute): execute.side_effect = [idlutils.RowNotFound for _ in range(4)] self._update_lb_to_lr_association.stop() self.assertRaises( idlutils.RowNotFound, self.helper._update_lb_to_lr_association, self.ref_lb1, self.router) def test__update_lb_to_lr_association_by_step(self): self._get_lb_to_lr_association_commands.stop() self._update_lb_to_lr_association_by_step.stop() self.helper._update_lb_to_lr_association_by_step( self.network.load_balancer[0], self.router) self.helper.ovn_nbdb_api.db_set.assert_called() self.helper.ovn_nbdb_api.lr_lb_add.assert_called() def test__update_lb_to_lr_association_by_step_exception_raise( self): self._get_lb_to_lr_association_commands.stop() self._update_lb_to_lr_association_by_step.stop() (self.helper.ovn_nbdb_api.db_set.return_value.execute. side_effect) = [idlutils.RowNotFound] self.assertRaises( idlutils.RowNotFound, self.helper._update_lb_to_lr_association_by_step, self.network.load_balancer[0], self.router) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs_with_retry') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_vip_port_update_handler_multiple_lbs(self, net_cli, lb): lb1 = mock.MagicMock() lb2 = mock.MagicMock() lb.return_value = [lb1, lb2] self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) fake_port = fakes.FakePort.create_one_port() net_cli.return_value.get_port.return_value = fake_port port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') attrs = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) attrs_old = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: '172.24.4.40'}} old = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs_old) self.switch_port_event.run(mock.ANY, row, old) def expected_call(lb): return {'type': 'handle_vip_fip', 'info': {'action': ovn_const.REQ_INFO_ACTION_DISASSOCIATE, 'vip_fip': '172.24.4.40', 'vip_related': [fake_port.fixed_ips[0]['ip_address']], 'additional_vip_fip': False, 'ovn_lb': lb}} self.mock_add_request.assert_has_calls([ mock.call(expected_call(lb1)), mock.call(expected_call(lb2))]) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_vip_port_update_handler_additional_vip_dissasociate(self, net_cli, lb): lb1 = mock.MagicMock() lb.return_value = [lb1] fip = '10.0.0.123' fake_port = fakes.FakePort.create_one_port() net_cli.return_value.get_port.return_value = fake_port self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) port_name = '%s%s' % (ovn_const.LB_VIP_ADDIT_PORT_PREFIX, '1-foo') attrs = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) attrs_old = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: fip}} old = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs_old) self.switch_port_event.run(mock.ANY, row, old) expected = { 'type': 'handle_vip_fip', 'info': { 'action': ovn_const.REQ_INFO_ACTION_DISASSOCIATE, 'vip_fip': fip, 'vip_related': [fake_port.fixed_ips[0]['ip_address']], 'additional_vip_fip': True, 'ovn_lb': lb1}} self.mock_add_request.assert_has_calls([mock.call(expected)]) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_vip_port_update_handler_additional_vip_associate(self, net_cli, lb): lb1 = mock.MagicMock() lb.return_value = [lb1] fake_port = fakes.FakePort.create_one_port() net_cli.return_value.get_port.return_value = fake_port self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) port_name = '%s%s' % (ovn_const.LB_VIP_ADDIT_PORT_PREFIX, '1-foo') attrs = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, ovn_const.OVN_PORT_FIP_EXT_ID_KEY: '10.0.0.99'}} row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs) attrs_old = {'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} old = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=attrs_old) self.switch_port_event.run(mock.ANY, row, old) expected = { 'type': 'handle_vip_fip', 'info': { 'action': ovn_const.REQ_INFO_ACTION_ASSOCIATE, 'vip_fip': '10.0.0.99', 'vip_related': [fake_port.fixed_ips[0]['ip_address']], 'additional_vip_fip': True, 'ovn_lb': lb1}} self.mock_add_request.assert_has_calls([mock.call(expected)]) lb1 = mock.MagicMock() vip_fip = '10.0.0.123' external_ids = { ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: '172.26.21.20', ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY: vip_fip} lb1.external_ids = external_ids lb.return_value = [lb1] self.switch_port_event = ovn_event.LogicalSwitchPortUpdateEvent( self.helper) self.switch_port_event.run(mock.ANY, row, old) self.mock_add_request.reset() self.mock_add_request.assert_has_calls([ mock.call(expected)]) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_disassociate(self, flb): lb = mock.MagicMock() vip_fip = '10.0.0.123' external_ids = { 'neutron:vip': '172.26.21.20', 'neutron:vip_fip': vip_fip} lb.external_ids = external_ids lb_hc = mock.MagicMock() lb_hc.uuid = "fake_lb_hc_vip" lb_hc.vip = "{}:80".format('172.26.21.20') lb_hc.external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: 'foo', ovn_const.LB_EXT_IDS_HM_POOL_KEY: 'pool_foo', ovn_const.LB_EXT_IDS_HM_VIP: '172.26.21.20'} lb_hc_fip = mock.MagicMock() lb_hc_fip.uuid = "fake_lb_hc_fip" lb_hc_fip.vip = "{}:80".format(vip_fip) lb_hc_fip.external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: 'foo', ovn_const.LB_EXT_IDS_HM_POOL_KEY: 'pool_foo', ovn_const.LB_EXT_IDS_HM_VIP: vip_fip} lb.health_check = [lb_hc, lb_hc_fip] fip_info = { 'action': 'disassociate', 'vip_fip': vip_fip, 'vip_related': ['172.26.21.20'], 'ovn_lb': lb} flb.return_value = lb self.helper.handle_vip_fip(fip_info) calls = [ mock.call.db_remove( 'Load_Balancer', lb.uuid, 'external_ids', 'neutron:vip_fip'), mock.call.db_remove( 'Load_Balancer', lb.uuid, 'health_check', lb_hc_fip.uuid), mock.call.db_destroy('Load_Balancer_Health_Check', lb_hc_fip.uuid), mock.call.db_clear('Load_Balancer', lb.uuid, 'vips'), mock.call.db_set('Load_Balancer', lb.uuid, ('vips', {}))] self.helper.ovn_nbdb_api.assert_has_calls(calls) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_disassociate_no_lbhc(self, flb): lb = mock.MagicMock() vip_fip = '10.0.0.123' external_ids = { 'neutron:vip': '172.26.21.20', 'neutron:vip_fip': vip_fip} lb.external_ids = external_ids lb.health_check = [] fip_info = { 'action': 'disassociate', 'vip_fip': vip_fip, 'ovn_lb': lb} flb.return_value = lb self.helper.handle_vip_fip(fip_info) calls = [ mock.call.db_remove( 'Load_Balancer', lb.uuid, 'external_ids', 'neutron:vip_fip'), mock.call.db_clear('Load_Balancer', lb.uuid, 'vips'), mock.call.db_set('Load_Balancer', lb.uuid, ('vips', {}))] self.helper.ovn_nbdb_api.assert_has_calls(calls) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_disassociate_no_matching_lbhc(self, flb): lb = mock.MagicMock() vip_fip = '10.0.0.123' external_ids = { 'neutron:vip': '172.26.21.20', 'neutron:vip_fip': vip_fip} lb.external_ids = external_ids lb_hc = mock.MagicMock() lb_hc.uuid = "fake_lb_hc" lb_hc.vip = "10.0.0.222:80" lb.health_check = [lb_hc] lb.health_check = [] fip_info = { 'action': 'disassociate', 'vip_fip': vip_fip, 'ovn_lb': lb} flb.return_value = lb self.helper.handle_vip_fip(fip_info) calls = [ mock.call.db_remove( 'Load_Balancer', lb.uuid, 'external_ids', 'neutron:vip_fip'), mock.call.db_clear('Load_Balancer', lb.uuid, 'vips'), mock.call.db_set('Load_Balancer', lb.uuid, ('vips', {}))] self.helper.ovn_nbdb_api.assert_has_calls(calls) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_sync(self, fb): lb = mock.MagicMock() fip_info = { 'action': 'sync', 'vip_fip': '10.0.0.123', 'ovn_lb': lb} members = 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id) external_ids = { 'listener_foo': '80:pool_%s' % self.pool_id, 'pool_%s' % self.pool_id: members, 'neutron:vip': '172.26.21.20'} lb.external_ids = external_ids fb.return_value = lb self.helper.handle_vip_fip(fip_info) expected_db_set_calls = [ mock.call('Load_Balancer', lb.uuid, ('external_ids', {'neutron:vip_fip': '10.0.0.123'})), mock.call('Load_Balancer', lb.uuid, ('vips', {'10.0.0.123:80': '192.168.2.149:1010', '172.26.21.20:80': '192.168.2.149:1010'})) ] self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_db_set_calls) self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', lb.uuid, 'vips') @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_sync_same_ext(self, fb): lb = mock.MagicMock() fip_info = { 'action': 'sync', 'vip_fip': '10.0.0.123', 'ovn_lb': lb} members = 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id) external_ids = { 'listener_foo': '80:pool_%s' % self.pool_id, 'pool_%s' % self.pool_id: members, 'neutron:vip': '172.26.21.20', 'neutron:vip_fip': '10.0.0.123', } lb.external_ids = external_ids fb.return_value = lb self.helper.handle_vip_fip(fip_info) expected_db_set_calls = [ mock.call('Load_Balancer', lb.uuid, ('vips', {'10.0.0.123:80': '192.168.2.149:1010', '172.26.21.20:80': '192.168.2.149:1010'})) ] self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_db_set_calls) self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', lb.uuid, 'vips') @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_sync_hm_exist(self, fb): lb = mock.MagicMock() vip_fip = '10.0.0.123' fip_info = { 'action': 'sync', 'vip_fip': '10.0.0.123', 'ovn_lb': lb} members = 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id) external_ids = { 'listener_foo': '80:pool_%s' % self.pool_id, 'pool_%s' % self.pool_id: members, 'neutron:vip': '172.26.21.20', 'neutron:vip_fip': '10.0.0.123', } lb_hc_fip = mock.MagicMock() lb_hc_fip.uuid = "fake_lb_hc_fip" lb_hc_fip.vip = f"{vip_fip}:80" lb_hc_fip.external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: 'foo', ovn_const.LB_EXT_IDS_HM_POOL_KEY: 'pool_foo', ovn_const.LB_EXT_IDS_HM_VIP: vip_fip} lb.health_check = [lb_hc_fip] lb.vips = { '172.26.21.20:80': '192.168.2.149:1010', '10.0.0.123:80': '192.168.2.149:1010' } lb.external_ids = external_ids fb.return_value = lb self.helper.handle_vip_fip(fip_info) self.helper.ovn_nbdb_api.db_set.assert_not_called() self.helper.ovn_nbdb_api.db_clear.assert_not_called() def test_get_lsp(self): self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] port_id = '664224ab-a7c4-4918-8d81-0712947eb600' network_id = 'c75cb020-f789-432e-b50e-89a20e5e463d' with mock.patch.object(ovn_helper, 'LOG') as m_l: self.assertIsNone(self.helper.get_lsp(port_id=port_id, network_id=network_id)) m_l.warn.assert_called_once_with( f'Logical Switch neutron-{network_id} not found.') def test_get_lsp_port_not_found(self): self.helper.ovn_nbdb_api.lookup.return_value = mock.MagicMock(ports=[]) port_id = '664224ab-a7c4-4918-8d81-0712947eb600' network_id = 'c75cb020-f789-432e-b50e-89a20e5e463d' self.assertIsNone(self.helper.get_lsp(port_id=port_id, network_id=network_id)) def test_get_lsp_port_found(self): port_id = '664224ab-a7c4-4918-8d81-0712947eb600' network_id = 'c75cb020-f789-432e-b50e-89a20e5e463d' port = mock.MagicMock() port.name = port_id self.helper.ovn_nbdb_api.lookup.return_value = mock.MagicMock( ports=[port]) self.assertEqual(self.helper.get_lsp( port_id=port_id, network_id=network_id), port) def test_get_lsp_port_found_many_ports(self): port_id = '664224ab-a7c4-4918-8d81-0712947eb600' network_id = 'c75cb020-f789-432e-b50e-89a20e5e463d' port = mock.MagicMock() port.name = port_id port2 = mock.MagicMock() port2.name = 'foo' self.helper.ovn_nbdb_api.lookup.return_value = mock.MagicMock( ports=[port2, port]) self.assertEqual(self.helper.get_lsp( port_id=port_id, network_id=network_id), port) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_associate(self, fb): lb = mock.MagicMock() fip_info = { 'action': 'associate', 'vip_fip': '10.0.0.123', 'ovn_lb': lb} members = 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id) external_ids = { 'listener_foo': '80:pool_%s' % self.pool_id, 'pool_%s' % self.pool_id: members, 'neutron:vip': '172.26.21.20'} lb.external_ids = external_ids fb.return_value = lb self.helper.handle_vip_fip(fip_info) expected_db_set_calls = [ mock.call('Load_Balancer', lb.uuid, ('external_ids', {'neutron:vip_fip': '10.0.0.123'})), mock.call('Load_Balancer', lb.uuid, ('vips', {'10.0.0.123:80': '192.168.2.149:1010', '172.26.21.20:80': '192.168.2.149:1010'})) ] self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_db_set_calls) self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', lb.uuid, 'vips') @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_additional_vip_fip_disassociate(self, flb): lb = mock.MagicMock() vip_fip = '10.0.0.123' external_ids = { ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: '172.26.21.20', ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY: vip_fip} lb.external_ids = external_ids lb_hc = mock.MagicMock() lb_hc.uuid = "fake_lb_hc_vip" lb_hc.vip = "{}:80".format('172.26.21.20') lb_hc.external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: 'foo', ovn_const.LB_EXT_IDS_HM_POOL_KEY: 'pool_foo', ovn_const.LB_EXT_IDS_HM_VIP: '172.26.21.20'} lb_hc_fip = mock.MagicMock() lb_hc_fip.uuid = "fake_lb_hc_fip" lb_hc_fip.vip = "{}:80".format(vip_fip) lb_hc_fip.external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: 'foo', ovn_const.LB_EXT_IDS_HM_POOL_KEY: 'pool_foo', ovn_const.LB_EXT_IDS_HM_VIP: vip_fip} lb.health_check = [lb_hc, lb_hc_fip] fip_info = { 'action': 'disassociate', 'vip_fip': vip_fip, 'vip_related': ['172.26.21.20'], 'additional_vip_fip': True, 'ovn_lb': lb} flb.return_value = lb self.helper.handle_vip_fip(fip_info) calls = [ mock.call.db_remove( 'Load_Balancer', lb.uuid, 'external_ids', ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY), mock.call.db_remove( 'Load_Balancer', lb.uuid, 'health_check', lb_hc_fip.uuid), mock.call.db_destroy('Load_Balancer_Health_Check', lb_hc_fip.uuid), mock.call.db_clear('Load_Balancer', lb.uuid, 'vips'), mock.call.db_set('Load_Balancer', lb.uuid, ('vips', {}))] self.helper.ovn_nbdb_api.assert_has_calls(calls) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_additional_vip_fip_associate(self, fb): lb = mock.MagicMock() fip_info = { 'action': 'associate', 'vip_fip': '10.0.0.123', 'vip_related': ['172.26.21.20'], 'additional_vip_fip': True, 'ovn_lb': lb} members = 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id) external_ids = { 'listener_foo': '80:pool_%s' % self.pool_id, 'pool_%s' % self.pool_id: members, 'neutron:vip': '172.26.21.20', ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY: '172.25.21.20'} ovn_hm = mock.MagicMock() ovn_hm.uuid = self.healthmonitor_id ovn_hm.vip = '172.26.21.20:80' ovn_hm.external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: ovn_hm.uuid, ovn_const.LB_EXT_IDS_HM_POOL_KEY: self.pool_id, ovn_const.LB_EXT_IDS_HM_VIP: '172.26.21.20'} ovn_hm_addi = mock.MagicMock() ovn_hm_addi.uuid = self.healthmonitor_id ovn_hm_addi.vip = '172.25.21.20:80' ovn_hm_addi.external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: ovn_hm_addi.uuid, ovn_const.LB_EXT_IDS_HM_POOL_KEY: self.pool_id, ovn_const.LB_EXT_IDS_HM_VIP: '172.25.21.20'} lb.health_check = [ovn_hm, ovn_hm_addi] lb.external_ids = external_ids fb.return_value = lb self.helper.handle_vip_fip(fip_info) expected_db_create_calls = [] for lbhc in lb.health_check: if lbhc.external_ids[ovn_const.LB_EXT_IDS_HM_VIP] == ( '172.26.21.20'): lb_hc_external_ids = copy.deepcopy(lbhc.external_ids) lb_hc_external_ids[ovn_const.LB_EXT_IDS_HM_VIP] = '10.0.0.123' kwargs = { 'vip': fip_info['vip_fip'] + ':80', 'options': lbhc.options, 'external_ids': lb_hc_external_ids} expected_db_create_calls.append(mock.call( 'Load_Balancer_Health_Check', **kwargs)) self.helper.ovn_nbdb_api.db_create.assert_has_calls( expected_db_create_calls) self.helper.ovn_nbdb_api.db_add.assert_called_once_with( 'Load_Balancer', lb.uuid, 'health_check', mock.ANY) expected_db_set_calls = [ mock.call('Load_Balancer', lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_ADDIT_VIP_FIP_KEY: '10.0.0.123'} )), mock.call('Load_Balancer', lb.uuid, ('vips', {'10.0.0.123:80': '192.168.2.149:1010', '172.26.21.20:80': '192.168.2.149:1010', '172.25.21.20:80': '192.168.2.149:1010'})) ] self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_db_set_calls) self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', lb.uuid, 'vips') @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_associate_no_lbhc(self, fb): lb = mock.MagicMock() fip_info = { 'action': 'associate', 'vip_fip': '10.0.0.123', 'ovn_lb': lb} members = 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id) external_ids = { 'listener_foo': '80:pool_%s' % self.pool_id, 'pool_%s' % self.pool_id: members, 'neutron:vip': '172.26.21.20'} lb.external_ids = external_ids lb.health_check = [] fb.return_value = lb self.helper.handle_vip_fip(fip_info) self.helper.ovn_nbdb_api.db_create.assert_not_called() self.helper.ovn_nbdb_api.db_add.assert_not_called() expected_db_set_calls = [ mock.call('Load_Balancer', lb.uuid, ('external_ids', {'neutron:vip_fip': '10.0.0.123'})), mock.call('Load_Balancer', lb.uuid, ('vips', {'10.0.0.123:80': '192.168.2.149:1010', '172.26.21.20:80': '192.168.2.149:1010'})) ] self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_db_set_calls) self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', lb.uuid, 'vips') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_has_no_fip(self, net_cli): lb = mock.MagicMock() info = { 'id': self.member_id, 'pool_id': self.pool_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} external_ids = { 'neutron:vip_fip': ''} lb.external_ids = external_ids self.mock_find_lb_pool_key.return_value = lb self.helper.handle_member_dvr(info) net_cli.get_subnet.assert_not_called() self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_fip_no_ls_ports(self, net_cli): lb = mock.MagicMock() info = { 'id': self.member_id, 'subnet_id': self.member_subnet_id, 'pool_id': self.pool_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} external_ids = { 'neutron:vip_fip': '11.11.11.11'} lb.external_ids = external_ids self.mock_find_lb_pool_key.return_value = lb fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': {}, 'ports': {}}) self.helper.ovn_nbdb_api.lookup.return_value = fake_ls self.helper.handle_member_dvr(info) self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_fip_no_subnet(self, net_cli): lb = mock.MagicMock() info = { 'id': self.member_id, 'subnet_id': self.member_subnet_id, 'pool_id': self.pool_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} external_ids = { 'neutron:vip_fip': '11.11.11.11'} lb.external_ids = external_ids self.mock_find_lb_pool_key.return_value = lb net_cli.return_value.get_subnet.side_effect = [ openstack.exceptions.ResourceNotFound] self.helper.handle_member_dvr(info) self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_fip_no_ls(self, net_cli): lb = mock.MagicMock() info = { 'id': self.member_id, 'subnet_id': self.member_subnet_id, 'pool_id': self.pool_id, 'action': ovn_const.REQ_INFO_MEMBER_ADDED} external_ids = { 'neutron:vip_fip': '11.11.11.11'} lb.external_ids = external_ids self.mock_find_lb_pool_key.return_value = lb self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] self.helper.handle_member_dvr(info) self.helper.ovn_nbdb_api.db_clear.assert_not_called() def _test_handle_member_dvr_lb_fip( self, net_cli, action=ovn_const.REQ_INFO_MEMBER_ADDED): lb = mock.MagicMock() fake_port = fakes.FakePort.create_one_port( attrs={'allowed_address_pairs': ''}) info = { 'id': self.member_id, 'address': fake_port['fixed_ips'][0]['ip_address'], 'pool_id': self.pool_id, 'subnet_id': fake_port['fixed_ips'][0]['subnet_id'], 'action': action} member_subnet = fakes.FakeSubnet.create_one_subnet() member_subnet.update({'id': self.member_subnet_id}) member_subnet.update({'network_id': 'foo'}) net_cli.return_value.get_subnet.return_value = member_subnet fake_lsp = fakes.FakeOVNPort.from_neutron_port( fake_port) fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': {}, 'name': 'foo', 'ports': [fake_lsp]}) self.helper.ovn_nbdb_api.lookup.return_value = fake_ls fake_nat = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ip': '22.22.22.22', 'external_ids': { ovn_const.OVN_FIP_EXT_ID_KEY: 'fip_id'}}) fip_info = {'description': 'bar'} net_cli.return_value.get_ip.return_value = fip_info self.helper.ovn_nbdb_api.db_find_rows.return_value. \ execute.return_value = [fake_nat] external_ids = { ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '11.11.11.11'} lb.external_ids = external_ids self.mock_find_lb_pool_key.return_value = lb self.helper.handle_member_dvr(info) if action == ovn_const.REQ_INFO_MEMBER_ADDED: calls = [ mock.call.lookup('Logical_Switch', 'neutron-foo'), mock.call.db_find_rows('NAT', ('external_ids', '=', { ovn_const.OVN_FIP_PORT_EXT_ID_KEY: fake_lsp.name})), mock.ANY, mock.call.db_clear('NAT', fake_nat.uuid, 'external_mac'), mock.ANY, mock.call.db_clear('NAT', fake_nat.uuid, 'logical_port'), mock.ANY] self.helper.ovn_nbdb_api.assert_has_calls(calls) else: (net_cli.return_value.get_ip. assert_called_once_with('fip_id')) (net_cli.return_value.update_ip. assert_called_once_with('fip_id', description='bar')) self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_fip_member_added(self, net_cli): self._test_handle_member_dvr_lb_fip(net_cli) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_handle_member_dvr_lb_fip_member_deleted(self, net_cli): self._test_handle_member_dvr_lb_fip( net_cli, action=ovn_const.REQ_INFO_MEMBER_DELETED) def test_ovsdb_connections(self): ovn_helper.OvnProviderHelper.ovn_nbdb_api = None ovn_helper.OvnProviderHelper.ovn_nbdb_api_for_events = None prov_helper1 = ovn_helper.OvnProviderHelper() prov_helper2 = ovn_helper.OvnProviderHelper() # One connection for API requests self.assertIs(prov_helper1.ovn_nbdb_api.ovsdb_connection, prov_helper2.ovn_nbdb_api.ovsdb_connection) # One connection to handle events self.assertIs(prov_helper1.ovn_nbdb_api_for_events, prov_helper2.ovn_nbdb_api_for_events) prov_helper2.shutdown() prov_helper1.shutdown() def test_create_vip_port_vip_selected(self): expected_dict = { 'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.loadbalancer_id), 'fixed_ips': [{ 'subnet_id': self.vip_dict['vip_subnet_id'], 'ip_address': '10.1.10.1'}], 'network_id': self.vip_dict['vip_network_id'], 'admin_state_up': True, 'project_id': self.project_id} with mock.patch.object(clients, 'get_neutron_client') as net_cli: self.vip_dict['vip_address'] = '10.1.10.1' self.helper.create_vip_port(self.project_id, self.loadbalancer_id, self.vip_dict) expected_call = [ mock.call().create_port(**expected_dict)] net_cli.assert_has_calls(expected_call) def test_create_vip_port_vip_not_selected(self): expected_dict = { 'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.loadbalancer_id), 'fixed_ips': [{ 'subnet_id': self.vip_dict['vip_subnet_id']}], 'network_id': self.vip_dict['vip_network_id'], 'admin_state_up': True, 'project_id': self.project_id} with mock.patch.object(clients, 'get_neutron_client') as net_cli: self.helper.create_vip_port(self.project_id, self.loadbalancer_id, self.vip_dict) expected_call = [ mock.call().create_port(**expected_dict)] net_cli.assert_has_calls(expected_call) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_create_vip_port_vip_selected_already_exist(self, net_cli): net_cli.return_value.create_port.side_effect = [ openstack.exceptions.ConflictException] net_cli.return_value.find_port.return_value = ( Port(name='ovn-lb-vip-' + self.loadbalancer_id, id=self.loadbalancer_id)) self.vip_dict['vip_address'] = '10.1.10.1' ret, _ = self.helper.create_vip_port( self.project_id, self.loadbalancer_id, self.vip_dict) self.assertEqual( '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.loadbalancer_id), ret.name) self.assertEqual(self.loadbalancer_id, ret.id) expected_call = [ mock.call().find_port( network_id='%s' % self.vip_dict['vip_network_id'], name_or_id='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.loadbalancer_id))] net_cli.assert_has_calls(expected_call) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_create_vip_port_vip_selected_other_allocation_exist( self, net_cli): net_cli.return_value.create_port.side_effect = [ openstack.exceptions.ConflictException] net_cli.return_value.find_port.return_value = None self.vip_dict['vip_address'] = '10.1.10.1' self.assertRaises( openstack.exceptions.ConflictException, self.helper.create_vip_port, self.project_id, self.loadbalancer_id, self.vip_dict) expected_call = [ mock.call().find_port( network_id='%s' % self.vip_dict['vip_network_id'], name_or_id='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.loadbalancer_id))] net_cli.assert_has_calls(expected_call) self.helper._update_status_to_octavia.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_create_vip_port_vip_neutron_client_other_exception( self, del_port, net_cli): net_cli.return_value.create_port.side_effect = [ openstack.exceptions.HttpException] self.assertRaises( openstack.exceptions.HttpException, self.helper.create_vip_port, self.project_id, self.loadbalancer_id, self.vip_dict) del_port.assert_not_called() self.helper._update_status_to_octavia.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test_create_vip_port_exception_on_additional_vip_ports( self, del_port, net_cli): additional_vip_dicts = [{ 'ip_address': '192.168.100.109', 'network_id': self.vip_dict['vip_network_id'], 'port_id': uuidutils.generate_uuid(), 'subnet_id': uuidutils.generate_uuid() }, { 'ip_address': '192.168.200.109', 'network_id': self.vip_dict['vip_network_id'], 'port_id': uuidutils.generate_uuid(), 'subnet_id': uuidutils.generate_uuid() }] net_cli.return_value.create_port.side_effect = [ Port( network_id=self.vip_dict['vip_network_id'], name=ovn_const.LB_VIP_PORT_PREFIX + self.loadbalancer_id, id=self.pool_id), Port( network_id=self.vip_dict['vip_network_id'], name=ovn_const.LB_VIP_ADDIT_PORT_PREFIX + '1-' + self.loadbalancer_id, fixed_ips=[{ 'subnet_id': additional_vip_dicts[0]['subnet_id'], 'ip_address': additional_vip_dicts[0]['ip_address']}], id=additional_vip_dicts[0]['port_id']), openstack.exceptions.HttpException] net_cli.return_value.find_port.side_effect = [ Port( name=ovn_const.LB_VIP_PORT_PREFIX + self.loadbalancer_id, id=self.pool_id), Port( name=ovn_const.LB_VIP_ADDIT_PORT_PREFIX + '1-' + self.loadbalancer_id, id=additional_vip_dicts[0]['port_id']) ] self.assertRaises( openstack.exceptions.HttpException, self.helper.create_vip_port, self.project_id, self.loadbalancer_id, self.vip_dict, additional_vip_dicts) expected_calls = [ mock.call(self.pool_id), mock.call(additional_vip_dicts[0]['port_id'])] del_port.assert_has_calls(expected_calls) self.helper._update_status_to_octavia.assert_not_called() def test_get_pool_member_id(self): ret = self.helper.get_pool_member_id( self.pool_id, mem_addr_port='192.168.2.149:1010') self.assertEqual(self.member_id, ret) def test_get_pool_member_id_not_found(self): ret = self.helper.get_pool_member_id( self.pool_id, mem_addr_port='192.168.2.149:9999') self.assertIsNone(ret) def test__get_existing_pool_members(self): ret = self.helper._get_existing_pool_members(self.pool_id) self.assertEqual(ret, self.member_line) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lb_by_pool_id') def test__get_existing_pool_members_exception(self, folbpi): folbpi.return_value = (None, None) self.assertRaises(exceptions.DriverError, self.helper._get_existing_pool_members, self.pool_id) def test__frame_lb_vips(self): ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids) expected = {'10.22.33.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'} self.assertEqual(expected, ret) def test__frame_lb_vips_member_offline(self): self.ovn_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \ '{"%s": "%s"}' % (self.member_id, constants.OFFLINE) ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids) expected = {} self.assertEqual(expected, ret) def test__frame_lb_vips_no_vip_fip(self): self.ovn_lb.external_ids.pop(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids) expected = {'10.22.33.4:80': '192.168.2.149:1010'} self.assertEqual(expected, ret) def test__frame_lb_vips_additional_vips_only_member_ipv4(self): self.ovn_lb.external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY] = \ '10.24.34.4,2001:db8::1' ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids) expected = {'10.22.33.4:80': '192.168.2.149:1010', '10.24.34.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010'} self.assertEqual(expected, ret) def test__frame_lb_vips_additional_vips_mixing_member_ipv4_ipv6(self): self.ovn_lb.external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY] = \ '10.24.34.4,2001:db8::1' self.member_address = '2001:db8::3' self.member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.ovn_lb.external_ids['pool_%s' % self.pool_id] = ','.join([ self.ovn_lb.external_ids['pool_%s' % self.pool_id], self.member_line]) ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids) expected = {'10.22.33.4:80': '192.168.2.149:1010', '10.24.34.4:80': '192.168.2.149:1010', '123.123.123.123:80': '192.168.2.149:1010', '[2001:db8::1]:80': '[2001:db8::3]:1010'} self.assertEqual(expected, ret) def test__frame_lb_vips_additional_vips_only_member_ipv6(self): self.ovn_lb.external_ids[ovn_const.LB_EXT_IDS_ADDIT_VIP_KEY] = \ '10.24.34.4,2001:db8::1' self.member_address = '2001:db8::3' self.member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.ovn_lb.external_ids['pool_%s' % self.pool_id] = self.member_line ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids) expected = {'[2001:db8::1]:80': '[2001:db8::3]:1010'} self.assertEqual(expected, ret) def test__frame_lb_vips_disabled(self): self.ovn_lb.external_ids['enabled'] = 'False' ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids) self.assertEqual({}, ret) def test__frame_lb_vips_ipv6(self): self.member_address = '2001:db8::1' self.member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_port, self.member_subnet_id)) self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: 'fc00::', 'pool_%s' % self.pool_id: self.member_line, 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} ret = self.helper._frame_vip_ips(self.ovn_lb, self.ovn_lb.external_ids) expected = {'[fc00::]:80': '[2001:db8::1]:1010'} self.assertEqual(expected, ret) def test_check_lb_protocol(self): self.ovn_lb.protocol = ['tcp'] ret = self.helper.check_lb_protocol(self.listener_id, 'udp') self.assertFalse(ret) ret = self.helper.check_lb_protocol(self.listener_id, 'UDP') self.assertFalse(ret) ret = self.helper.check_lb_protocol(self.listener_id, 'sctp') self.assertFalse(ret) ret = self.helper.check_lb_protocol(self.listener_id, 'SCTP') self.assertFalse(ret) ret = self.helper.check_lb_protocol(self.listener_id, 'tcp') self.assertTrue(ret) ret = self.helper.check_lb_protocol(self.listener_id, 'TCP') self.assertTrue(ret) def test_check_lb_protocol_no_listener(self): self.ovn_lb.external_ids = [] ret = self.helper.check_lb_protocol(self.listener_id, 'TCP') self.assertTrue(ret) @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_check_lb_protocol_no_lb(self, fol): fol.return_value = None ret = self.helper.check_lb_protocol(self.listener_id, 'TCP') self.assertFalse(ret) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__get_port_from_info_with_port_id(self, net_cli): port_id = self.vip_port_id subnet_id = self.vip_subnet_id network_id = None address = self.vip_address fake_port = fakes.FakePort.create_one_port() fake_port['fixed_ips'].append({ 'ip_address': address, 'subnet_id': subnet_id}) subnet_data = {'id': subnet_id} net_cli.get_port.return_value = fake_port net_cli.get_subnet.return_value = {'id': subnet_id} result_port, result_subnet = self.helper._get_port_from_info( net_cli, port_id, network_id, address) self.assertEqual(result_port, fake_port) self.assertEqual(result_subnet, subnet_data) net_cli.get_port.assert_called_once_with(port_id) net_cli.get_subnet.assert_called_once_with(subnet_id) result_port, result_subnet = self.helper._get_port_from_info( net_cli, port_id, network_id, address, subnet_required=False) self.assertEqual(result_port, fake_port) self.assertIsNone(result_subnet) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_neutron_list_ports') def test__get_port_from_info_with_network_id_and_address(self, list_ports, net_cli): port_id = None network_id = self.vip_network_id address = self.vip_address fake_port = fakes.FakePort.create_one_port() fake_port['fixed_ips'].append({ 'ip_address': self.vip_address, 'subnet_id': self.vip_subnet_id}) fake_port2 = fakes.FakePort.create_one_port() fake_port2['fixed_ips'].append({ 'ip_address': '192.148.210.119', 'subnet_id': uuidutils.generate_uuid()}) subnet_data = {'id': self.vip_subnet_id} ports_data = [fake_port, fake_port2] net_cli.get_subnet.return_value = subnet_data list_ports.return_value = ports_data result_port, result_subnet = self.helper._get_port_from_info( net_cli, port_id, network_id, address) self.assertEqual(result_port, fake_port) self.assertEqual(result_subnet, subnet_data) list_ports.assert_called_once_with(net_cli, network_id=network_id) net_cli.get_subnet.assert_called_once_with(self.vip_subnet_id) result_port, result_subnet = self.helper._get_port_from_info( net_cli, port_id, network_id, address, subnet_required=False) self.assertEqual(result_port, fake_port) self.assertIsNone(result_subnet) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__get_port_from_info_port_not_without_match_address(self, net_cli): port_id = self.vip_port_id network_id = None address = self.vip_address fake_port = fakes.FakePort.create_one_port() net_cli.get_port.return_value = fake_port result_port, result_subnet = self.helper._get_port_from_info( net_cli, port_id, network_id, address) self.assertEqual(result_port, fake_port) self.assertIsNone(result_subnet) net_cli.get_port.assert_called_once_with(port_id) net_cli.get_subnet.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__get_port_from_info_port_insufficient_data(self, net_cli): result_port, result_subnet = self.helper._get_port_from_info( net_cli, None, None, None) self.assertIsNone(result_port) self.assertIsNone(result_subnet) net_cli.get_port.assert_not_called() net_cli.get_subnet.assert_not_called() def test__get_vip_port_from_loadbalancer_id(self): fake_lb = fakes.FakeLB( uuid=uuidutils.generate_uuid(), admin_state_up=True, listeners=[], loadbalancer_id=self.loadbalancer_id, name='additional_vip_lb', project_id=self.project_id, vip_address=self.vip_address, vip_port_id=self.vip_port_id, vip_network_id=self.vip_network_id, ext_ids={}) self.octavia_driver_lib.get_loadbalancer.return_value = fake_lb lb_vip_port_id = self.helper._get_vip_port_from_loadbalancer_id( self.loadbalancer_id) self.assertEqual(lb_vip_port_id, self.vip_port_id) def test__get_additional_vips_from_loadbalancer_id(self): additional_vips_data = [ { 'port_id': uuidutils.generate_uuid(), 'ip_address': '10.0.0.50', 'network_id': self.vip_network_id, 'subnet_id': uuidutils.generate_uuid(), }, { 'port_id': uuidutils.generate_uuid(), 'ip_address': '10.0.1.50', 'network_id': self.vip_network_id, 'subnet_id': uuidutils.generate_uuid(), }] fake_lb = fakes.FakeLB( uuid=uuidutils.generate_uuid(), admin_state_up=True, listeners=[], loadbalancer_id=self.loadbalancer_id, name='additional_vip_lb', project_id=self.project_id, vip_address=self.vip_address, vip_port_id=self.vip_port_id, vip_network_id=self.vip_network_id, additional_vips=additional_vips_data, ext_ids={}) self.octavia_driver_lib.get_loadbalancer.return_value = fake_lb lb_additional_vip = \ self.helper._get_additional_vips_from_loadbalancer_id( self.loadbalancer_id) self.assertEqual(lb_additional_vip, additional_vips_data) fake_lb = fakes.FakeLB( uuid=uuidutils.generate_uuid(), admin_state_up=True, listeners=[], loadbalancer_id=self.loadbalancer_id, name='additional_vip_lb', project_id=self.project_id, vip_address=self.vip_address, vip_port_id=self.vip_port_id, vip_network_id=self.vip_network_id, ext_ids={}) self.octavia_driver_lib.get_loadbalancer.return_value = fake_lb lb_additional_vip = \ self.helper._get_additional_vips_from_loadbalancer_id( self.loadbalancer_id) self.assertEqual([], lb_additional_vip) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_member') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def _test_hm_create(self, protocol, members, fip, folbpi, uhm, net_cli): self._get_pool_listeners.stop() fake_subnet = fakes.FakeSubnet.create_one_subnet() pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.protocol = [protocol] folbpi.return_value = (pool_key, self.ovn_hm_lb) uhm.return_value = constants.ONLINE net_cli.return_value.get_subnet.return_value = {'subnet': fake_subnet} if not fip: del self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] self._update_external_ids_member_status(self.ovn_hm_lb, self.member_id, 'online') status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) if members: self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) vip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] + ':' + str(self.listener['protocol_port'])) if fip: fip = (self.ovn_hm_lb.external_ids[ ovn_const.LB_EXT_IDS_VIP_FIP_KEY] + ':' + str(self.listener['protocol_port'])) options = {'interval': '6', 'timeout': '7', 'failure_count': '5', 'success_count': '3'} external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id, ovn_const.LB_EXT_IDS_HM_VIP: self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY], ovn_const.LB_EXT_IDS_HM_POOL_KEY: self.pool_id} kwargs = {'vip': vip, 'options': options, 'external_ids': external_ids} if fip: external_ids_fips = copy.deepcopy(external_ids) external_ids_fips[ovn_const.LB_EXT_IDS_HM_VIP] = ( self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY]) fip_kwargs = {'vip': fip, 'options': options, 'external_ids': external_ids_fips} expected_lbhc_calls = [ mock.call('Load_Balancer_Health_Check', **kwargs)] if fip: expected_lbhc_calls.append( mock.call('Load_Balancer_Health_Check', **fip_kwargs) ) self.helper.ovn_nbdb_api.db_create.assert_has_calls( expected_lbhc_calls) if fip: self.assertEqual(self.helper.ovn_nbdb_api.db_add.call_count, 2) else: self.helper.ovn_nbdb_api.db_add.assert_called_once_with( 'Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', mock.ANY) def test_hm_create_tcp(self): self._test_hm_create('tcp', False, True) def test_hm_create_tcp_no_fip(self): self._test_hm_create('tcp', False, False) def test_hm_create_udp(self): self._test_hm_create('udp', False, True) def test_hm_create_tcp_pool_members(self): pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = self.member_line self._test_hm_create('tcp', True, True) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_no_vip_port(self, folbpi): pool_key = 'pool_%s' % self.pool_id listener_key = 'listener_%s' % self.listener_id self.ovn_hm_lb.external_ids.pop(listener_key) folbpi.return_value = (pool_key, self.ovn_hm_lb) status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ONLINE) vip = '' options = {'interval': '6', 'timeout': '7', 'success_count': '3', 'failure_count': '5'} self.ovn_hm.external_ids.pop(ovn_const.LB_EXT_IDS_HM_KEY) external_ids_vip = { ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id, ovn_const.LB_EXT_IDS_HM_POOL_KEY: self.pool_id, ovn_const.LB_EXT_IDS_HM_VIP: self.ovn_hm_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_KEY)} external_ids_fip = { ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id, ovn_const.LB_EXT_IDS_HM_POOL_KEY: self.pool_id, ovn_const.LB_EXT_IDS_HM_VIP: self.ovn_hm_lb.external_ids.get( ovn_const.LB_EXT_IDS_VIP_FIP_KEY)} kwargs_first = {'vip': vip, 'options': options, 'external_ids': external_ids_vip} kwargs_second = {'vip': vip, 'options': options, 'external_ids': external_ids_fip} expected_lbhc_calls = [ mock.call('Load_Balancer_Health_Check', **kwargs_first), mock.call('Load_Balancer_Health_Check', **kwargs_second)] self.helper.ovn_nbdb_api.db_create.assert_has_calls( expected_lbhc_calls) self.assertEqual(self.helper.ovn_nbdb_api.db_add.call_count, 2) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_offline(self, folbpi): self._get_pool_listeners.stop() pool_key = 'pool_%s' % self.pool_id folbpi.return_value = (pool_key, self.ovn_hm_lb) self.health_monitor['admin_state_up'] = False self._update_external_ids_member_status(self.ovn_hm_lb, self.member_id, 'online') status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.OFFLINE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_lb_not_found(self, folbpi): folbpi.return_value = (None, None) status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_pool_not_found(self, folbpi): folbpi.return_value = ('pool_closed', self.ovn_hm_lb) status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) self.assertEqual(status['pools'][0]['operating_status'], constants.OFFLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_vip_not_found(self, folbpi): pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids.pop(ovn_const.LB_EXT_IDS_VIP_KEY) folbpi.return_value = (pool_key, self.ovn_hm_lb) status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_lsp_not_found(self, folbpi, net_cli): pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = self.member_line folbpi.return_value = (pool_key, self.ovn_hm_lb) net_cli.return_value.get_subnet.side_effect = [ openstack.exceptions.ResourceNotFound] status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ONLINE) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_hm_port_not_found(self, folbpi, net_cli): fake_subnet = fakes.FakeSubnet.create_one_subnet() fake_port = fakes.FakePort.create_one_port( attrs={'allowed_address_pairs': ''}) member = {'id': uuidutils.generate_uuid(), 'address': fake_port['fixed_ips'][0]['ip_address'], 'protocol_port': '9999', 'subnet_id': fake_subnet['id'], 'pool_id': self.pool_id, 'admin_state_up': True, 'old_admin_state_up': True} member_line = ('member_%s_%s:%s_%s' % (member['id'], member['address'], member['protocol_port'], member['subnet_id'])) pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = member_line folbpi.return_value = (pool_key, self.ovn_hm_lb) net_cli.return_value.get_subnet.return_value = fake_subnet net_cli.return_value.ports.return_value = iter(()) fake_lsp = fakes.FakeOVNPort.from_neutron_port(fake_port) fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': {}, 'ports': [fake_lsp]}) self.helper.ovn_nbdb_api.lookup.return_value = fake_ls status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_hm_source_ip_not_found(self, folbpi, net_cli): fake_subnet = fakes.FakeSubnet.create_one_subnet() fake_port = fakes.FakePort.create_one_port( attrs={'allowed_address_pairs': ''}) member = {'id': uuidutils.generate_uuid(), 'address': fake_port['fixed_ips'][0]['ip_address'], 'protocol_port': '9999', 'subnet_id': fake_subnet['id'], 'pool_id': self.pool_id, 'admin_state_up': True, 'old_admin_state_up': True} member_line = ( 'member_%s_%s:%s_%s' % (member['id'], member['address'], member['protocol_port'], member['subnet_id'])) pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = member_line folbpi.return_value = (pool_key, self.ovn_hm_lb) net_cli.return_value.get_subnet.return_value = fake_subnet fake_lsp = fakes.FakeOVNPort.from_neutron_port(fake_port) fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'external_ids': {}, 'ports': [fake_lsp]}) self.helper.ovn_nbdb_api.lookup.return_value = fake_ls status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_create_db_exception(self, folbpi): pool_key = 'pool_%s' % self.pool_id folbpi.return_value = (pool_key, self.ovn_hm_lb) self.helper.ovn_nbdb_api.db_create.side_effect = [RuntimeError] status = self.helper.hm_create(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb') def test_hm_existing_lbhc_update_on_listener_create(self, get_ovn_lb): vip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] + ':' + str(self.listener['protocol_port'])) fip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] + ':' + str(self.listener['protocol_port'])) self.ovn_hm.vip = [] self.ovn_hm_fip = copy.deepcopy(self.ovn_hm) self.ovn_hm_fip.external_ids[ovn_const.LB_EXT_IDS_HM_VIP] = ( self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY]) self.ovn_hm_lb.health_check = [self.ovn_hm, self.ovn_hm_fip] get_ovn_lb.return_value = self.ovn_hm_lb self.listener['admin_state_up'] = True status = self.helper.listener_create(self.listener) expected_set_external_ids_calls = [ mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid, ('vip', vip)), mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid, ('vip', fip))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_set_external_ids_calls) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb') def test_hm_create_then_listener_create_no_fip(self, get_ovn_lb): vip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] + ':' + str(self.listener['protocol_port'])) self.ovn_hm.vip = [] self.ovn_hm_lb.health_check = [self.ovn_hm] get_ovn_lb.return_value = self.ovn_hm_lb self.listener['admin_state_up'] = True del self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] status = self.helper.listener_create(self.listener) self.helper.ovn_nbdb_api.db_set.assert_called_with( 'Load_Balancer_Health_Check', self.ovn_hm.uuid, ('vip', vip)) self.helper.ovn_nbdb_api.db_create.assert_not_called() self.helper.ovn_nbdb_api.db_add.assert_not_called() self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') @mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_lbhcs_by_hm_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb') def test_hm_create_then_listener_create_no_vip(self, get_ovn_lb, lookup_hm, refresh_vips): get_ovn_lb.return_value = self.ovn_hm_lb lookup_hm.return_value = [self.ovn_hm] self.ovn_hm_lb.health_check = [self.ovn_hm] self.ovn_hm_lb.external_ids.pop(ovn_const.LB_EXT_IDS_VIP_KEY) self.listener['admin_state_up'] = True status = self.helper.listener_create(self.listener) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_update(self, folbfhi): folbfhi.return_value = ([self.ovn_hm], self.ovn_hm_lb) status = self.helper.hm_update(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_update_no_admin_state_up(self, folbfhi): folbfhi.return_value = ([self.ovn_hm], self.ovn_hm_lb) self.ovn_hm_lb.pop('admin_state_up') status = self.helper.hm_update(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_update_offline(self, folbfhi): folbfhi.return_value = ([self.ovn_hm], self.ovn_hm_lb) self.health_monitor['admin_state_up'] = False status = self.helper.hm_update(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.OFFLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_update_hm_not_found(self, folbfhi): folbfhi.return_value = ([], None) status = self.helper.hm_update(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_update_lb_not_found(self, folbpi, folbfhi): folbfhi.return_value = ([self.ovn_hm], None) folbpi.return_value = (None, None) status = self.helper.hm_update(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.ERROR) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.ERROR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_update_just_interval(self, folbfhi): folbfhi.return_value = ([self.ovn_hm], self.ovn_hm_lb) self.health_monitor['interval'] = 3 self.helper.hm_update(self.health_monitor) options = { 'interval': str(self.health_monitor['interval']), 'timeout': str(self.health_monitor['timeout']), 'success_count': str(self.health_monitor['success_count']), 'failure_count': str(self.health_monitor['failure_count'])} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer_Health_Check', self.ovn_hm.uuid, ('options', options)) @mock.patch.object(ovn_helper.OvnProviderHelper, '_clean_up_hm_port') def test_hm_delete(self, del_hm_port): self._get_pool_listeners.stop() pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = self.member_line self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.side_effect = [[self.ovn_hm_lb], [self.ovn_hm]] status = self.helper.hm_delete(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) expected_remove_calls = [ mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', self.ovn_hm.uuid), mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'external_ids', ovn_const.LB_EXT_IDS_HMS_KEY)] expected_destroy_calls = [ mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid)] del_hm_port.assert_called_once_with(self.member_subnet_id) self.helper.ovn_nbdb_api.db_remove.assert_has_calls( expected_remove_calls) self.helper.ovn_nbdb_api.db_destroy.assert_has_calls( expected_destroy_calls) @mock.patch.object(ovn_helper.OvnProviderHelper, '_clean_up_hm_port') def test_hm_delete_multiples_pools_sharing_members(self, del_hm_port): self._get_pool_listeners.stop() pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = self.member_line self.ovn_hm_lb.external_ids['pool_fake'] = self.member_line self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.side_effect = [[self.ovn_hm_lb], [self.ovn_hm]] status = self.helper.hm_delete(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['id'], self.pool_id) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) expected_remove_calls = [ mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', self.ovn_hm.uuid), mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'external_ids', ovn_const.LB_EXT_IDS_HMS_KEY)] expected_destroy_calls = [ mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid)] del_hm_port.assert_called_once_with(self.member_subnet_id) self.helper.ovn_nbdb_api.db_remove.assert_has_calls( expected_remove_calls) self.helper.ovn_nbdb_api.db_destroy.assert_has_calls( expected_destroy_calls) @mock.patch.object(ovn_helper.OvnProviderHelper, '_clean_up_hm_port') def test_hm_delete_without_members_in_pool(self, del_hm_port): self._get_pool_listeners.stop() pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = '' self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.side_effect = [[self.ovn_hm_lb], [self.ovn_hm]] status = self.helper.hm_delete(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ACTIVE) expected_remove_calls = [ mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', self.ovn_hm.uuid), mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'external_ids', ovn_const.LB_EXT_IDS_HMS_KEY)] expected_destroy_calls = [ mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid)] del_hm_port.assert_not_called() self.helper.ovn_nbdb_api.db_remove.assert_has_calls( expected_remove_calls) self.helper.ovn_nbdb_api.db_destroy.assert_has_calls( expected_destroy_calls) def test_hm_delete_row_not_found(self): self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.return_value = [self.ovn_hm] self.helper.ovn_nbdb_api.db_find_rows.side_effect = ( [idlutils.RowNotFound]) status = self.helper.hm_delete(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) self.helper.ovn_nbdb_api.db_clear.assert_not_called() def test_hm_delete_hm_not_found(self): self.helper.ovn_nbdb_api.db_list_rows.return_value.\ execute.return_value = [self.ovn_hm] self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_hm_lb] self.health_monitor['id'] = 'id_not_found' status = self.helper.hm_delete(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_delete_hm_not_found_in_external_ids(self, folbfhi): folbfhi.return_value = (self.ovn_hm, self.ovn_hm_lb) self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_HMS_KEY] = [] status = self.helper.hm_delete(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') def test_hm_delete_hm_not_match_in_external_ids(self, folbfhi): folbfhi.return_value = (self.ovn_hm, self.ovn_hm_lb) self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_HMS_KEY] = \ '["%s"]' % (uuidutils.generate_uuid()) status = self.helper.hm_delete(self.health_monitor) self.assertEqual(status['healthmonitors'][0]['provisioning_status'], constants.DELETED) self.assertEqual(status['healthmonitors'][0]['operating_status'], constants.NO_MONITOR) expected_set_external_ids_calls = [ mock.call('Load_Balancer', self.ovn_hm_lb.uuid, ('external_ids', { ovn_const.LB_EXT_IDS_HMS_KEY: self.ovn_hm_lb.external_ids[ ovn_const.LB_EXT_IDS_HMS_KEY]}))] self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_set_external_ids_calls) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_member') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def _test_hm_sync(self, protocol, fip, folbpi, uhm, net_cli): self._get_pool_listeners.stop() fake_subnet = fakes.FakeSubnet.create_one_subnet() pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.protocol = [protocol] folbpi.return_value = (pool_key, self.ovn_hm_lb) uhm.return_value = constants.ONLINE net_cli.return_value.get_subnet.return_value = {'subnet': fake_subnet} if not fip: del self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] self._update_external_ids_member_status(self.ovn_hm_lb, self.member_id, 'online') self.helper.hm_sync(self.health_monitor, self.ovn_lb, pool_key) vip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] + ':' + str(self.listener['protocol_port'])) if fip: fip = (self.ovn_hm_lb.external_ids[ ovn_const.LB_EXT_IDS_VIP_FIP_KEY] + ':' + str(self.listener['protocol_port'])) options = {'interval': '6', 'timeout': '7', 'failure_count': '5', 'success_count': '3'} external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id, ovn_const.LB_EXT_IDS_HM_VIP: self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY], ovn_const.LB_EXT_IDS_HM_POOL_KEY: self.pool_id} kwargs = {'vip': vip, 'options': options, 'external_ids': external_ids} if fip: external_ids_fips = copy.deepcopy(external_ids) external_ids_fips[ovn_const.LB_EXT_IDS_HM_VIP] = ( self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY]) fip_kwargs = {'vip': fip, 'options': options, 'external_ids': external_ids_fips} expected_lbhc_calls = [ mock.call('Load_Balancer_Health_Check', **kwargs)] if fip: expected_lbhc_calls.append( mock.call('Load_Balancer_Health_Check', **fip_kwargs) ) self.helper.ovn_nbdb_api.db_create.assert_has_calls( expected_lbhc_calls) if fip: self.assertEqual(self.helper.ovn_nbdb_api.db_add.call_count, 2) else: self.helper.ovn_nbdb_api.db_add.assert_called_once_with( 'Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', mock.ANY) def test_hm_sync_recreate(self): self._test_hm_sync('tcp', True) def test_hm_sync_recreate_no_fip(self): self._test_hm_sync('tcp', False) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_member') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_sync_on_exist(self, folbpi, uhm, net_cli, folbfhi): protocol, fip = ('tcp', True) folbfhi.return_value = ([self.ovn_hm], self.ovn_hm_lb) self._get_pool_listeners.stop() fake_subnet = fakes.FakeSubnet.create_one_subnet() pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.protocol = [protocol] folbpi.return_value = (pool_key, self.ovn_hm_lb) uhm.return_value = constants.ONLINE net_cli.return_value.get_subnet.return_value = {'subnet': fake_subnet} if not fip: del self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] self._update_external_ids_member_status(self.ovn_hm_lb, self.member_id, 'online') self.helper.hm_sync(self.health_monitor, self.ovn_lb, pool_key) self.helper.ovn_nbdb_api.db_create.assert_not_called() self.assertEqual(self.helper.ovn_nbdb_api.db_add.call_count, 0) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_member') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def _test_hm_sync_on_exist_diff(self, protocol, fip, diff, folbpi, uhm, net_cli, folbfhi): vip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] + ':' + str(self.listener['protocol_port'])) if fip: fip = (self.ovn_hm_lb.external_ids[ ovn_const.LB_EXT_IDS_VIP_FIP_KEY] + ':' + str(self.listener['protocol_port'])) options = {'interval': '6', 'timeout': '7', 'failure_count': '5', 'success_count': '3'} external_ids = { ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id, ovn_const.LB_EXT_IDS_HM_VIP: self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY], ovn_const.LB_EXT_IDS_HM_POOL_KEY: self.pool_id } ovn_external_ids = { ovn_const.LB_EXT_IDS_HMS_KEY: f'["{self.healthmonitor_id}"]' } hm_opt = copy.deepcopy(options) self.ovn_hm.options = hm_opt self.ovn_hm.vip = vip self.ovn_hm.external_ids = external_ids self.ovn_hm_lb.health_check = [self.ovn_hm] if diff: self.ovn_hm.vip = 'foo' self.ovn_hm.options['interval'] = '4' self.ovn_hm.external_ids = {} self.ovn_hm_lb.health_check = '' self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_HMS_KEY] = [] folbfhi.return_value = ([self.ovn_hm], self.ovn_hm_lb) self._get_pool_listeners.stop() fake_subnet = fakes.FakeSubnet.create_one_subnet() pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.protocol = [protocol] folbpi.return_value = (pool_key, self.ovn_hm_lb) uhm.return_value = constants.ONLINE net_cli.return_value.get_subnet.return_value = {'subnet': fake_subnet} if not fip: del self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] self._update_external_ids_member_status(self.ovn_hm_lb, self.member_id, 'online') self.helper.hm_sync(self.health_monitor, self.ovn_lb, pool_key) self.helper.ovn_nbdb_api.db_create.assert_not_called() if diff: self.helper.ovn_nbdb_api.db_add.assert_called_once_with( 'Load_Balancer', self.ovn_hm_lb.uuid, ('health_check', self.ovn_hm.uuid) ) else: self.helper.ovn_nbdb_api.db_create.assert_not_called() if diff: expected_lbhc_calls = [ mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid, ('vip', vip)), mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid, ('options', options)), mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid, ('external_ids', external_ids)), mock.call('Load_Balancer', self.ovn_hm_lb.uuid, ('external_ids', ovn_external_ids)) ] else: expected_lbhc_calls = [] if fip: expected_lbhc_calls.extend([ mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid, ('vip', fip))]) self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_lbhc_calls) def test_hm_sync_on_exist_diff(self): protocol, fip = ('tcp', True) self._test_hm_sync_on_exist_diff(protocol, fip, True) def test_hm_sync_on_exist_diff_nofip(self): protocol, fip = ('tcp', False) self._test_hm_sync_on_exist_diff(protocol, fip, True) def test_hm_sync_on_exist_no_diff(self): protocol, fip = ('tcp', True) self._test_hm_sync_on_exist_diff(protocol, fip, False) def test_hm_sync_on_exist_no_diff_nofip(self): protocol, fip = ('tcp', False) self._test_hm_sync_on_exist_diff(protocol, fip, False) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_member') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') def test_hm_sync_on_exist_no_fip(self, folbpi, uhm, net_cli, folbfhi): protocol, fip = ('tcp', False) folbfhi.return_value = ([self.ovn_hm], self.ovn_hm_lb) self._get_pool_listeners.stop() fake_subnet = fakes.FakeSubnet.create_one_subnet() pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.protocol = [protocol] folbpi.return_value = (pool_key, self.ovn_hm_lb) uhm.return_value = constants.ONLINE net_cli.return_value.get_subnet.return_value = {'subnet': fake_subnet} if not fip: del self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] self._update_external_ids_member_status(self.ovn_hm_lb, self.member_id, 'online') self.helper.hm_sync(self.health_monitor, self.ovn_lb, pool_key) self.helper.ovn_nbdb_api.db_create.assert_not_called() self.assertEqual(self.helper.ovn_nbdb_api.db_add.call_count, 0) @mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_lbhcs_by_hm_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lbs_with_retry') def test_hm_purge(self, m_fol, lookup_hm): self.ovn_hm_lb.health_check = [self.ovn_hm] m_fol.return_value = [self.ovn_hm_lb] lookup_hm.return_value = [self.ovn_hm] self.helper.hm_purge(self.ovn_hm_lb.id) self.helper.ovn_nbdb_api.db_remove.assert_not_called() self.helper.ovn_nbdb_api.db_destroy.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_lbhcs_by_hm_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lbs_with_retry') def test_hm_purge_remove(self, m_fol, lookup_hm): self.ovn_hm_lb.health_check = [self.ovn_hm] m_fol.return_value = [self.ovn_hm_lb] lookup_hm.return_value = [] self.helper.hm_purge(self.ovn_hm_lb.id) self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( 'Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', self.ovn_hm.uuid) self.helper.ovn_nbdb_api.db_destroy.assert_called_once_with( 'Load_Balancer_Health_Check', self.ovn_hm.uuid) @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lbs_with_retry') def test_hm_purge_exception_ovn_lb_not_found(self, m_fol): self.ovn_hm_lb.health_check = [self.ovn_hm] m_fol.side_effect = [idlutils.RowNotFound] with mock.patch.object(ovn_helper, 'LOG') as m_l: self.helper.hm_purge(self.ovn_hm_lb.id) m_l.debug.assert_called_once_with( f"OVN loadbalancer {self.ovn_hm_lb.id} not found.") @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lbs_with_retry') def test_hm_purge_exception_execute_commands(self, m_fol, m_exe): self.ovn_hm_lb.health_check = [self.ovn_hm] m_exe.side_effect = [idlutils.RowNotFound] m_fol.return_value = [self.ovn_hm_lb] with mock.patch.object(ovn_helper, 'LOG') as m_l: self.helper.hm_purge(self.ovn_hm_lb.id) m_l.debug.assert_called_once_with( "health check not found for purge.") def test_hm_update_event_offline(self): self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_hm_lb] self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent( self.helper) src_ip = '10.22.33.4' row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip': self.member_address, 'logical_port': 'a-logical-port', 'src_ip': src_ip, 'port': self.member_port, 'protocol': self.ovn_hm_lb.protocol, 'status': ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE}) self.hm_update_event.run('update', row, mock.ANY) expected = { 'info': {'ovn_lbs': [self.ovn_hm_lb], 'ip': self.member_address, 'port': self.member_port, 'status': ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE}, 'type': 'hm_update_event'} self.mock_add_request.assert_called_once_with(expected) self.helper.ovn_nbdb_api.db_find_rows.assert_called_once_with( 'Load_Balancer', ('ip_port_mappings', '=', {self.member_address: 'a-logical-port:' + src_ip}), ('protocol', '=', self.ovn_hm_lb.protocol[0])) def test_hm_update_event_offline_by_delete(self): self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [self.ovn_hm_lb] self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent( self.helper) src_ip = '10.22.33.4' row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip': self.member_address, 'logical_port': 'a-logical-port', 'src_ip': src_ip, 'port': self.member_port, 'protocol': self.ovn_hm_lb.protocol, 'status': ovn_const.HM_EVENT_MEMBER_PORT_ONLINE}) self.hm_update_event.run('delete', row, mock.ANY) expected = { 'info': {'ovn_lbs': [self.ovn_hm_lb], 'ip': self.member_address, 'port': self.member_port, 'status': ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE}, 'type': 'hm_update_event'} self.mock_add_request.assert_called_once_with(expected) self.helper.ovn_nbdb_api.db_find_rows.assert_called_once_with( 'Load_Balancer', ('ip_port_mappings', '=', {self.member_address: 'a-logical-port:' + src_ip}), ('protocol', '=', self.ovn_hm_lb.protocol[0])) def test_hm_update_event_lb_not_found(self): self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [] self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip': self.member_address, 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': self.member_port, 'protocol': self.ovn_hm_lb.protocol, 'status': ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE}) self.hm_update_event.run('update', row, mock.ANY) self.mock_add_request.assert_not_called() def test_hm_update_event_lb_row_not_found(self): self.helper.ovn_nbdb_api.db_find_rows.\ side_effect = [idlutils.RowNotFound] self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent( self.helper) row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip': self.member_address, 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': self.member_port, 'protocol': self.ovn_hm_lb.protocol, 'status': ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE}) self.hm_update_event.run('update', row, mock.ANY) self.mock_add_request.assert_not_called() def _test_hm_update_no_member(self, bad_ip, bad_port): fake_subnet = fakes.FakeSubnet.create_one_subnet() fake_port = fakes.FakePort.create_one_port( attrs={'allowed_address_pairs': ''}) ip = fake_port['fixed_ips'][0]['ip_address'] member = {'id': uuidutils.generate_uuid(), 'address': ip, 'protocol_port': self.member_port, 'subnet_id': fake_subnet['id'], 'pool_id': self.pool_id, 'admin_state_up': True, 'old_admin_state_up': True} member_line = ( 'member_%s_%s:%s_%s' % (member['id'], member['address'], member['protocol_port'], member['subnet_id'])) pool_key = 'pool_%s' % self.pool_id self.ovn_hm_lb.external_ids[pool_key] = member_line if bad_ip: ip = 'bad-ip' port = self.member_port if bad_port: port = 'bad-port' info = { 'ovn_lbs': [self.ovn_hm_lb], 'ip': ip, 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': port, 'protocol': self.ovn_hm_lb.protocol, 'status': ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE} status = self.helper.hm_update_event(info) self.assertIsNone(status) def test_hm_update_event_member_ip_not_found(self): self._test_hm_update_no_member(True, False) def test_hm_update_event_member_port_not_found(self): self._test_hm_update_no_member(False, True) def _test_hm_update_status(self, ovn_lbs, member_id, ip, port, mb_status): info = { 'ovn_lbs': ovn_lbs, 'ip': ip, 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': port, 'protocol': ovn_lbs[0].protocol, 'status': [mb_status]} mb_status_ovn = 'error' if mb_status == 'offline' else mb_status self._update_external_ids_member_status(self.ovn_hm_lb, member_id, mb_status_ovn) status = self.helper.hm_update_event(info) return status @mock.patch.object(ovn_helper.OvnProviderHelper, '_create_hm_port') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__ensure_hm_ovn_port(self, mock_get_neutron_client, mock_create_hm_port): mock_neutron_client = mock_get_neutron_client.return_value mock_find_port = mock_neutron_client.find_port mock_find_port.return_value = Port(id='fake_id') self.helper._ensure_hm_ovn_port('network_id', 'subnet_id', 'project_id') mock_find_port.assert_called_once_with( network_id='network_id', name_or_id='%s%s' % (ovn_const.LB_HM_PORT_PREFIX, 'subnet_id')) mock_create_hm_port.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_create_hm_port') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__ensure_hm_ovn_port_create_port_on_network( self, mock_get_neutron_client, mock_create_hm_port): mock_neutron_client = mock_get_neutron_client.return_value mock_find_port = mock_neutron_client.find_port mock_find_port.return_value = None self.helper._ensure_hm_ovn_port('network_id', 'subnet_id', 'project_id') mock_find_port.assert_called_once_with( network_id='network_id', name_or_id='%s%s' % (ovn_const.LB_HM_PORT_PREFIX, 'subnet_id')) mock_create_hm_port.assert_called_with('network_id', 'subnet_id', 'project_id') def _update_external_ids_member_status(self, lb, member_id, member_status): status = constants.ONLINE if member_status == 'offline': status = constants.OFFLINE elif member_status == 'error': status = constants.ERROR try: existing_member_status = lb.external_ids[ ovn_const.OVN_MEMBER_STATUS_KEY] member_statuses = jsonutils.loads(existing_member_status) except Exception: member_statuses = {} member_statuses[member_id] = status lb.external_ids[ ovn_const.OVN_MEMBER_STATUS_KEY] = jsonutils.dumps( member_statuses) def _add_member(self, lb, subnet, port, pool_id=None, ip=None): if not pool_id: pool_id = self.pool_id if not ip: fake_port = fakes.FakePort.create_one_port( attrs={'allowed_address_pairs': ''}) ip = fake_port['fixed_ips'][0]['ip_address'] member = {'id': uuidutils.generate_uuid(), 'address': ip, 'protocol_port': port, 'subnet_id': subnet['id'], 'pool_id': pool_id, 'admin_state_up': True, 'old_admin_state_up': True} member_line = ( 'member_%s_%s:%s_%s' % (member['id'], member['address'], member['protocol_port'], member['subnet_id'])) pool_key = 'pool_%s' % pool_id existing_members = lb.external_ids[pool_key] existing_member_status = lb.external_ids[ ovn_const.OVN_MEMBER_STATUS_KEY] try: member_statuses = jsonutils.loads(existing_member_status) except Exception: member_statuses = {} if existing_members: existing_members = ','.join([existing_members, member_line]) lb.external_ids[pool_key] = existing_members member_statuses[member['id']] = constants.ONLINE lb.external_ids[ ovn_const.OVN_MEMBER_STATUS_KEY] = jsonutils.dumps( member_statuses) else: lb.external_ids[pool_key] = member_line member_status = '{"%s": "%s"}' % (member['id'], constants.ONLINE) lb.external_ids[ ovn_const.OVN_MEMBER_STATUS_KEY] = member_status return member def test__create_hm_port(self): expected_dict = { 'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']), 'network_id': self.vip_dict['vip_network_id'], 'fixed_ips': [{'subnet_id': self.vip_dict['vip_subnet_id']}], 'admin_state_up': True, 'port_security_enabled': False, 'device_owner': ovn_const.OVN_LB_HM_PORT_DISTRIBUTED, 'device_id': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']), 'project_id': self.project_id } with mock.patch.object(clients, 'get_neutron_client') as net_cli: hm_port = self.helper._create_hm_port( self.vip_dict['vip_network_id'], self.vip_dict['vip_subnet_id'], self.project_id) expected_call = [ mock.call().create_port(**expected_dict)] net_cli.assert_has_calls(expected_call) self.assertIsNotNone(hm_port) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test__create_hm_port_neutron_client_exception( self, net_cli): net_cli.return_value.create_port.side_effect = [ openstack.exceptions.HttpException] net_cli.return_value.ports.return_value = [] expected_dict = { 'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']), 'network_id': self.vip_dict['vip_network_id'], 'fixed_ips': [{'subnet_id': self.vip_dict['vip_subnet_id']}], 'admin_state_up': True, 'port_security_enabled': False, 'device_owner': ovn_const.OVN_LB_HM_PORT_DISTRIBUTED, 'device_id': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']), 'project_id': self.project_id } hm_port = self.helper._create_hm_port( self.vip_dict['vip_network_id'], self.vip_dict['vip_subnet_id'], self.project_id) expected_call = [ mock.call(), mock.call().create_port(**expected_dict), mock.call(), mock.call().ports( name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']))] net_cli.assert_has_calls(expected_call) self.assertIsNone(hm_port) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, '_clean_up_hm_port') def test__create_hm_port_neutron_client_exception_clean_up_hm_port( self, del_hm_port, net_cli): net_cli.return_value.create_port.side_effect = [ openstack.exceptions.HttpException] net_cli.return_value.ports.return_value = [ Port(name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']), id='fake_uuid')] expected_dict = { 'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']), 'network_id': self.vip_dict['vip_network_id'], 'fixed_ips': [{ 'subnet_id': self.vip_dict['vip_subnet_id']}], 'admin_state_up': True, 'port_security_enabled': False, 'device_owner': ovn_const.OVN_LB_HM_PORT_DISTRIBUTED, 'device_id': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']), 'project_id': self.project_id } hm_port = self.helper._create_hm_port( self.vip_dict['vip_network_id'], self.vip_dict['vip_subnet_id'], self.project_id) expected_call = [ mock.call(), mock.call().create_port(**expected_dict)] net_cli.assert_has_calls(expected_call) del_hm_port.assert_called_once_with(self.vip_dict['vip_subnet_id']) self.assertIsNone(hm_port) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test__clean_up_hm_port(self, del_port, net_cli): net_cli.return_value.ports.return_value = [ Port(name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']), id='fake_uuid', fixed_ips=[{'subnet_id': 'another_subnet_id', 'ip_address': '10.1.2.3'}, {'subnet_id': self.vip_dict['vip_subnet_id'], 'ip_address': '10.0.0.3'}])] self.helper._clean_up_hm_port(self.vip_dict['vip_subnet_id']) expected_call = [ mock.call(), mock.call().ports( name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']))] net_cli.assert_has_calls(expected_call) del_port.assert_called_once_with('fake_uuid') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test__clean_up_hm_port_in_use(self, del_port, net_cli): net_cli.return_value.ports.return_value = [ Port(name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']), id='fake_uuid', fixed_ips=[{'subnet_id': 'another_subnet_id', 'ip_address': '10.1.2.3'}, {'subnet_id': self.vip_dict['vip_subnet_id'], 'ip_address': '10.0.0.3'}])] fake_lb_unrelated = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'ip_port_mappings': {'10.1.2.4': 'fake_member_lgp:10.1.2.3'}}) fake_lb_hm_port_in_use = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'ip_port_mappings': {'10.1.2.4': 'fake_member_lgp:10.1.2.3', '10.0.0.4': 'fake_member_lgp:10.0.0.3'}}) self.helper.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [fake_lb_unrelated, fake_lb_hm_port_in_use] self.helper._clean_up_hm_port(self.vip_dict['vip_subnet_id']) expected_call = [ mock.call(), mock.call().ports( name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']))] net_cli.assert_has_calls(expected_call) del_port.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port') def test__clean_up_hm_port_not_found(self, del_port, net_cli): net_cli.return_value.ports.return_value = [] self.helper._clean_up_hm_port(self.vip_dict['vip_subnet_id']) expected_call = [ mock.call(), mock.call().ports( name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX, self.vip_dict['vip_subnet_id']))] net_cli.assert_has_calls(expected_call) del_port.assert_not_called() def test_hm_update_status_offline(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member = self._add_member(self.ovn_hm_lb, fake_subnet, 8080) status = self._test_hm_update_status( [self.ovn_hm_lb], member['id'], member['address'], '8080', 'offline') self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ERROR) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) def test_hm_update_status_offline_two_lbs_affected(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() ovn_hm_lb_2 = copy.deepcopy(self.ovn_hm_lb) ovn_hm_lb_2.uuid = uuidutils.generate_uuid() member = self._add_member(self.ovn_hm_lb, fake_subnet, 8080) member_2 = self._add_member( ovn_hm_lb_2, fake_subnet, 8080, ip=member['address']) info = { 'ovn_lbs': [self.ovn_hm_lb, ovn_hm_lb_2], 'ip': member['address'], 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': '8080', 'protocol': self.ovn_hm_lb.protocol, 'status': ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE} self._update_external_ids_member_status(self.ovn_hm_lb, member['id'], 'error') self._update_external_ids_member_status(ovn_hm_lb_2, member_2['id'], 'error') status = self.helper.hm_update_event(info) self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ERROR) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) self.assertEqual(status['pools'][1]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][1]['operating_status'], constants.ERROR) self.assertEqual(status['members'][1]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][1]['operating_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][1]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][1]['operating_status'], constants.ERROR) def test_hm_update_status_offline_lb_pool_offline(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member = self._add_member(self.ovn_hm_lb, fake_subnet, 8080) status = self._test_hm_update_status( [self.ovn_hm_lb], member['id'], member['address'], '8080', 'offline') self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ERROR) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) def test_hm_update_status_online(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member = self._add_member(self.ovn_hm_lb, fake_subnet, 8080) status = self._test_hm_update_status( [self.ovn_hm_lb], member['id'], member['address'], '8080', 'online') self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) def test_hm_update_status_online_lb_pool_offline(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member = self._add_member(self.ovn_hm_lb, fake_subnet, 8080) status = self._test_hm_update_status( [self.ovn_hm_lb], member['id'], member['address'], '8080', 'online') self.assertEqual(status['pools'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) def test_hm_update_status_offline_two_members_diff_lbs_port(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() ovn_hm_lb2 = mock.MagicMock() ovn_hm_lb2.uuid = uuidutils.generate_uuid() listener_id_2 = uuidutils.generate_uuid() pool_id_2 = uuidutils.generate_uuid() ovn_hm_lb2.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.98', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.98', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_hm_port_2', 'enabled': True, 'pool_%s' % pool_id_2: [], 'listener_%s' % listener_id_2: '8081:pool_%s' % pool_id_2, ovn_const.OVN_MEMBER_STATUS_KEY: '{}'} member_lb1 = self._add_member(self.ovn_hm_lb, fake_subnet, 8080) ip_member = member_lb1['address'] member_lb2 = self._add_member(ovn_hm_lb2, fake_subnet, 8081, pool_id=pool_id_2, ip=ip_member) # member lb2 ERROR, so lb2 operating_status should be ERROR # for Pool and Loadbalancer, but lb1 should keep ONLINE self._update_external_ids_member_status(ovn_hm_lb2, member_lb2['id'], 'error') info = { 'ovn_lbs': [self.ovn_hm_lb, ovn_hm_lb2], 'ip': ip_member, 'logical_port': 'a-logical-port', 'src_ip': '10.22.33.4', 'port': '8081', 'protocol': ovn_hm_lb2.protocol, 'status': ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE} status = self.helper.hm_update_event(info) self.assertEqual(status['members'][0]['operating_status'], constants.ERROR) self.assertEqual(status['pools'][0]['operating_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) self.assertEqual(status['members'][0]['id'], member_lb2['id']) def test_hm_update_status_offline_two_members(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member_1 = self._add_member(self.ovn_hm_lb, fake_subnet, 8080) ip_1 = member_1['address'] member_2 = self._add_member(self.ovn_hm_lb, fake_subnet, 8081) ip_2 = member_2['address'] # This is the Octavia API version fake_member = fakes.FakeMember( uuid=member_2['id'], admin_state_up=True, name='member_2', project_id=self.project_id, address=ip_2, protocol_port=8081) # Second member ONLINE, operating_status should be DEGRADED # for Pool and Loadbalancer fake_member.operating_status = constants.ONLINE self.octavia_driver_lib.get_member.return_value = fake_member status = self._test_hm_update_status( [self.ovn_hm_lb], member_1['id'], ip_1, '8080', 'offline') self.assertEqual(status['members'][0]['operating_status'], constants.ERROR) self.assertEqual(status['pools'][0]['operating_status'], constants.DEGRADED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.DEGRADED) # Second member ERROR, operating_status should be ERROR # for Pool and Loadbalancer fake_member.operating_status = constants.ERROR self.octavia_driver_lib.get_member.return_value = fake_member status = self._test_hm_update_status( [self.ovn_hm_lb], member_2['id'], ip_2, '8081', 'offline') self.assertEqual(status['members'][0]['operating_status'], constants.ERROR) self.assertEqual(status['pools'][0]['operating_status'], constants.ERROR) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ERROR) def test_hm_update_status_online_two_members(self): fake_subnet = fakes.FakeSubnet.create_one_subnet() member_1 = self._add_member(self.ovn_hm_lb, fake_subnet, 8080) member_2 = self._add_member(self.ovn_hm_lb, fake_subnet, 8081) ip_2 = member_2['address'] # This is the Octavia API version fake_member = fakes.FakeMember( uuid=member_2['id'], admin_state_up=True, name='member_2', project_id=self.project_id, address=ip_2, protocol_port=8081) # Second member ERROR, operating_status should be DEGRADED # for Pool and Loadbalancer status = self._test_hm_update_status( [self.ovn_hm_lb], member_2['id'], ip_2, '8081', 'offline') member_status = { ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s", "%s": "%s"}' % (member_1['id'], constants.ONLINE, member_2['id'], constants.ERROR,)} self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_hm_lb.uuid, ('external_ids', member_status)) self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['operating_status'], constants.DEGRADED) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.DEGRADED) # Second member ONLINE, operating_status should be ONLINE # for Pool and Loadbalancer fake_member.operating_status = constants.ONLINE self.octavia_driver_lib.get_member.return_value = fake_member status = self._test_hm_update_status( [self.ovn_hm_lb], member_2['id'], ip_2, '8081', 'online') self.assertEqual(status['members'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['pools'][0]['operating_status'], constants.ONLINE) self.assertEqual(status['loadbalancers'][0]['operating_status'], constants.ONLINE) @mock.patch.object(ovn_helper.OvnProviderHelper, '_frame_vip_ips') def test_refresh_lb_vips_returns_empty_when_synced(self, mock_frame_vip_ips): self.ovn_lb.vips = {'vip1:port1': 'ip1:port1,ip2:port1'} lb_external_ids = {'external_id1': 'val1'} mock_frame_vip_ips.return_value = {'vip1:port1': 'ip1:port1,ip2:port1'} result = self.helper._refresh_lb_vips( self.ovn_lb, lb_external_ids, is_sync=True) self.assertEqual([], result) @mock.patch.object(ovn_helper.OvnProviderHelper, '_frame_vip_ips') def test_refresh_lb_vips_returns_db_operations_when_not_synced( self, mock_frame_vip_ips): self.ovn_lb.vips = {'vip1:port1': 'ip1:port1,ip2:port1'} lb_external_ids = {'external_id1': 'val1'} mock_frame_vip_ips.return_value = {'vip1:port1': 'ip1:port1,ip2:port1', 'fip1:port1': 'ip1:port1,ip2:port1'} self.helper._refresh_lb_vips( self.ovn_lb, lb_external_ids, is_sync=True) self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'vips' ) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('vips', {'vip1:port1': 'ip1:port1,ip2:port1', 'fip1:port1': 'ip1:port1,ip2:port1'}) ) @mock.patch.object(ovn_helper.OvnProviderHelper, '_frame_vip_ips') def test_refresh_lb_vips_returns_db_operations_when_is_sync_false( self, mock_frame_vip_ips): self.ovn_lb.vips = {'vip1:port1': 'ip1:port1,ip2:port1'} lb_external_ids = {'external_id1': 'val1'} mock_frame_vip_ips.return_value = {'vip1:port1': 'ip1:port1,ip2:port1'} self.helper._refresh_lb_vips( self.ovn_lb, lb_external_ids) # Assert that the method returns the db_clear and db_set operations self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, 'vips' ) self.helper.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', self.ovn_lb.uuid, ('vips', {'vip1:port1': 'ip1:port1,ip2:port1'}) ) def test_update_ip_port_mappings_add(self): # Setup mock OVN load balancer ovn_lb = mock.Mock() ovn_lb.uuid = 'test-lb-uuid' ovn_lb.external_ids = {} # Call the method with delete=False self.helper._update_ip_port_mappings( ovn_lb, '10.0.0.1', 'port1', '192.168.0.1', 'pool1', delete=False ) # Assert that lb_add_ip_port_mapping was called self.helper.ovn_nbdb_api.lb_add_ip_port_mapping\ .assert_called_once_with( 'test-lb-uuid', '10.0.0.1', 'port1', '192.168.0.1', ) def test_update_ip_port_mappings_delete_minimal(self): ovn_lb = mock.Mock() ovn_lb.uuid = 'test-lb-uuid' ovn_lb.external_ids = {} # Patch _extract_member_info to return no other members self.helper._extract_member_info = mock.Mock(return_value=[]) # Also patch ovn_nbdb_api call self.helper.ovn_nbdb_api.lb_del_ip_port_mapping = mock.Mock() self.helper.ovn_nbdb_api.lb_add_ip_port_mapping = mock.Mock() self.helper._update_ip_port_mappings( ovn_lb, backend_ip='10.0.0.1', port_name='dummy-port', src_ip='192.168.0.1', pool_key='pool-test', delete=True ) self.helper.ovn_nbdb_api.\ lb_del_ip_port_mapping.\ assert_called_once_with( 'test-lb-uuid', '10.0.0.1' ) def test_update_ip_port_mappings_delete_with_other_members_present(self): ovn_lb = mock.Mock() ovn_lb.uuid = 'test-lb-uuid' ovn_lb.external_ids = { "pool_A": "member_memberA_10.0.0.1:80_subnetA", "pool_B": "member_memberB_10.0.0.1:80_subnetA", "neutron:member_statuses": '{"memberB": "ONLINE"}' } self.helper.ovn_nbdb_api.lb_del_ip_port_mapping = mock.Mock() self.helper.ovn_nbdb_api.lb_add_ip_port_mapping = mock.Mock() # Call the method under test self.helper._update_ip_port_mappings( ovn_lb, backend_ip='10.0.0.1', port_name='dummy-port', src_ip='192.168.0.1', pool_key='pool_A', delete=True ) # Should not call delete because memberB is ONLINE and shares the IP self.helper.ovn_nbdb_api.lb_del_ip_port_mapping.assert_not_called() self.helper.ovn_nbdb_api.lb_add_ip_port_mapping.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider/tests/unit/test_maintenance.py0000664000175100017510000002017615033037524030243 0ustar00mylesmyles # Copyright 2023 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from futurist import periodics from neutron_lib import constants as n_const from ovn_octavia_provider.common import config as ovn_conf from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider import maintenance from ovn_octavia_provider.tests.unit import base as ovn_base from ovn_octavia_provider.tests.unit import fakes class TestDBInconsistenciesPeriodics(ovn_base.TestOvnOctaviaBase): def setUp(self): ovn_conf.register_opts() super(TestDBInconsistenciesPeriodics, self).setUp() self.maint = maintenance.DBInconsistenciesPeriodics() self.ovn_nbdb_api = mock.patch.object(self.maint, 'ovn_nbdb_api') self.ovn_nbdb_api.start() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_change_device_owner_lb_hm_ports(self, net_cli): ovn_lb_hm_ports = [ fakes.FakePort.create_one_port( attrs={ 'id': 'foo', 'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED, 'name': 'ovn-metadata-foo'}), fakes.FakePort.create_one_port( attrs={ 'id': 'foo1', 'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED, 'name': 'ovn-lb-hm-foo1'}), fakes.FakePort.create_one_port( attrs={ 'id': 'foo2', 'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED, 'name': 'ovn-lb-hm-foo2'})] net_cli.return_value.ports.return_value = ovn_lb_hm_ports self.assertRaises(periodics.NeverAgain, self.maint.change_device_owner_lb_hm_ports) expected_dict_1 = { 'device_owner': ovn_const.OVN_LB_HM_PORT_DISTRIBUTED, 'device_id': 'ovn-lb-hm-foo1', } expected_dict_2 = { 'device_owner': ovn_const.OVN_LB_HM_PORT_DISTRIBUTED, 'device_id': 'ovn-lb-hm-foo2', } expected_call = [ mock.call(), mock.call().ports(device_owner=n_const.DEVICE_OWNER_DISTRIBUTED), mock.call().update_port('foo1', **expected_dict_1), mock.call().update_port('foo2', **expected_dict_2)] net_cli.assert_has_calls(expected_call) self.maint.ovn_nbdb_api.db_find_rows.assert_called_once_with( "Logical_Switch_Port", ("name", "=", 'foo1')) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_change_device_owner_lb_hm_ports_neutron_version_doesnt_match( self, net_cli): ovn_lb_hm_ports = [ fakes.FakePort.create_one_port( attrs={ 'id': 'foo', 'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED, 'name': 'ovn-metadata-foo'}), fakes.FakePort.create_one_port( attrs={ 'id': 'foo1', 'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED, 'name': 'ovn-lb-hm-foo1'}), fakes.FakePort.create_one_port( attrs={ 'id': 'foo2', 'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED, 'name': 'ovn-lb-hm-foo2'})] net_cli.return_value.ports.return_value = ovn_lb_hm_ports self.maint.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [ fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'id': 'uuid-foo', 'type': 'foo'})] self.maint.change_device_owner_lb_hm_ports() expected_dict_change = { 'device_owner': ovn_const.OVN_LB_HM_PORT_DISTRIBUTED, 'device_id': 'ovn-lb-hm-foo1', } expected_dict_rollback = { 'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED, 'device_id': '', } expected_call = [ mock.call(), mock.call().ports(device_owner=n_const.DEVICE_OWNER_DISTRIBUTED), mock.call().update_port('foo1', **expected_dict_change), mock.call().update_port('foo1', **expected_dict_rollback)] net_cli.assert_has_calls(expected_call) self.maint.ovn_nbdb_api.db_find_rows.assert_called_once_with( "Logical_Switch_Port", ("name", "=", 'foo1')) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_change_device_owner_lb_hm_ports_no_ports_to_change(self, net_cli): ovn_lb_hm_ports = [] net_cli.return_value.ports.return_value = ovn_lb_hm_ports self.assertRaises(periodics.NeverAgain, self.maint.change_device_owner_lb_hm_ports) expected_call = [ mock.call(), mock.call().ports(device_owner=n_const.DEVICE_OWNER_DISTRIBUTED), ] net_cli.assert_has_calls(expected_call) self.maint.ovn_nbdb_api.db_find_rows.assert_not_called() def test_format_ip_port_mappings_ipv6_no_ip_port_mappings_to_change(self): self.maint.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = [] self.assertRaises(periodics.NeverAgain, self.maint.format_ip_port_mappings_ipv6) self.maint.ovn_nbdb_api.db_clear.assert_not_called() self.maint.ovn_nbdb_api.db_set.assert_not_called() @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_format_ip_port_mappings_ipv6(self, net_cli): ovn_lbs = [ fakes.FakeOVNLB.create_one_lb( attrs={ 'uuid': 'foo1', 'ip_port_mappings': { 'fda2:918e:5869:0:f816:3eff:fe64:adf7': 'f2b97caf-da62-4db9-91da-bc11f2ac3934:' 'fda2:918e:5869:0:f816:3eff:fe81:61d0', 'fda2:918e:5869:0:f816:3eff:fe64:adf8': 'f2b97caf-da62-4db9-91da-bc11f2ac3935:' 'fda2:918e:5869:0:f816:3eff:fe81:61d0'}}), fakes.FakeOVNLB.create_one_lb( attrs={ 'uuid': 'foo2', 'ip_port_mappings': { '192.168.1.50': 'f2b97caf-da62-4db9-91da-bc11f2ac3934:' '192.168.1.3'}}), fakes.FakeOVNLB.create_one_lb( attrs={ 'uuid': 'foo3', 'ip_port_mappings': { '[fda2:918e:5869:0:f816:3eff:fe64:adf7]': 'f2b97caf-da62-4db9-91da-bc11f2ac3934:' '[fda2:918e:5869:0:f816:3eff:fe81:61d0]'}}), ] self.maint.ovn_nbdb_api.db_find_rows.return_value.\ execute.return_value = ovn_lbs self.assertRaises(periodics.NeverAgain, self.maint.format_ip_port_mappings_ipv6) mapping1 = { '[fda2:918e:5869:0:f816:3eff:fe64:adf7]': 'f2b97caf-da62-4db9-91da-bc11f2ac3934:' '[fda2:918e:5869:0:f816:3eff:fe81:61d0]', '[fda2:918e:5869:0:f816:3eff:fe64:adf8]': 'f2b97caf-da62-4db9-91da-bc11f2ac3935:' '[fda2:918e:5869:0:f816:3eff:fe81:61d0]'} self.maint.ovn_nbdb_api.db_clear.assert_called_once_with( 'Load_Balancer', 'foo1', 'ip_port_mappings') self.maint.ovn_nbdb_api.db_set.assert_called_once_with( 'Load_Balancer', 'foo1', ('ip_port_mappings', mapping1)) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5149846 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider.egg-info/0000775000175100017510000000000015033037526023715 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider.egg-info/PKG-INFO0000644000175100017510000000501015033037524025002 0ustar00mylesmylesMetadata-Version: 2.2 Name: ovn-octavia-provider Version: 8.1.0.dev15 Summary: OpenStack Octavia integration with OVN Home-page: https://docs.openstack.org/ovn-octavia-provider/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.9 License-File: LICENSE Requires-Dist: keystoneauth1>=3.14.0 Requires-Dist: netaddr>=0.7.18 Requires-Dist: neutron-lib>=3.8.0 Requires-Dist: openstacksdk>=0.103.0 Requires-Dist: oslo.config>=8.0.0 Requires-Dist: oslo.log>=4.3.0 Requires-Dist: oslo.messaging>=12.4.0 Requires-Dist: oslo.serialization>=2.28.1 Requires-Dist: oslo.utils>=4.5.0 Requires-Dist: ovs>=2.10.0 Requires-Dist: ovsdbapp>=2.1.0 Requires-Dist: pbr>=4.0.0 Requires-Dist: SQLAlchemy>=1.4.23 Requires-Dist: tenacity>=6.0.0 Requires-Dist: octavia-lib>=2.2.0 Dynamic: author Dynamic: author-email Dynamic: classifier Dynamic: description Dynamic: home-page Dynamic: requires-dist Dynamic: requires-python Dynamic: summary =================================================================== ovn-octavia-provider - OVN Provider driver for Octavia LoadBalancer =================================================================== OVN provides virtual networking for Open vSwitch and is a component of the Open vSwitch project. This project provides integration between OpenStack Octavia and OVN. * Free software: Apache license * Source: https://opendev.org/openstack/ovn-octavia-provider * Bugs: https://bugs.launchpad.net/neutron/+bugs?field.tag=ovn-octavia-provider * Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss * IRC: #openstack-neutron on OFTC. * Docs: https://docs.openstack.org/ovn-octavia-provider/latest Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ovn-octavia-provider.svg :target: https://governance.openstack.org/tc/reference/tags/index.html * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/ovn-octavia-provider ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider.egg-info/SOURCES.txt0000664000175100017510000001204415033037524025600 0ustar00mylesmyles.coveragerc .pre-commit-config.yaml .pylintrc .stestr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini devstack/local.conf.sample devstack/plugin.sh devstack/settings doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/pdf-index.rst doc/source/_static/.placeholder doc/source/admin/driver.rst doc/source/admin/index.rst doc/source/configuration/config.rst doc/source/configuration/index.rst doc/source/contributor/index.rst doc/source/contributor/loadbalancer.rst etc/octavia/.placeholder etc/octavia/conf.d/.placeholder etc/oslo-config-generator/ovn.conf ovn_octavia_provider/__init__.py ovn_octavia_provider/agent.py ovn_octavia_provider/driver.py ovn_octavia_provider/event.py ovn_octavia_provider/helper.py ovn_octavia_provider/i18n.py ovn_octavia_provider/maintenance.py ovn_octavia_provider.egg-info/PKG-INFO ovn_octavia_provider.egg-info/SOURCES.txt ovn_octavia_provider.egg-info/dependency_links.txt ovn_octavia_provider.egg-info/entry_points.txt ovn_octavia_provider.egg-info/not-zip-safe ovn_octavia_provider.egg-info/pbr.json ovn_octavia_provider.egg-info/requires.txt ovn_octavia_provider.egg-info/top_level.txt ovn_octavia_provider/cmd/__init__.py ovn_octavia_provider/cmd/octavia_ovn_db_sync_util.py ovn_octavia_provider/common/clients.py ovn_octavia_provider/common/config.py ovn_octavia_provider/common/constants.py ovn_octavia_provider/common/exceptions.py ovn_octavia_provider/common/utils.py ovn_octavia_provider/hacking/__init__.py ovn_octavia_provider/hacking/checks.py ovn_octavia_provider/ovsdb/impl_idl_ovn.py ovn_octavia_provider/ovsdb/ovsdb_monitor.py ovn_octavia_provider/tests/__init__.py ovn_octavia_provider/tests/functional/__init__.py ovn_octavia_provider/tests/functional/base.py ovn_octavia_provider/tests/functional/requirements.txt ovn_octavia_provider/tests/functional/test_agent.py ovn_octavia_provider/tests/functional/test_driver.py ovn_octavia_provider/tests/functional/test_integration.py ovn_octavia_provider/tests/unit/__init__.py ovn_octavia_provider/tests/unit/base.py ovn_octavia_provider/tests/unit/fakes.py ovn_octavia_provider/tests/unit/test_agent.py ovn_octavia_provider/tests/unit/test_cmd.py ovn_octavia_provider/tests/unit/test_driver.py ovn_octavia_provider/tests/unit/test_hacking.py ovn_octavia_provider/tests/unit/test_helper.py ovn_octavia_provider/tests/unit/test_maintenance.py ovn_octavia_provider/tests/unit/common/__init__.py ovn_octavia_provider/tests/unit/common/test_clients.py ovn_octavia_provider/tests/unit/common/test_utils.py ovn_octavia_provider/tests/unit/hacking/__init__.py ovn_octavia_provider/tests/unit/hacking/test_checks.py ovn_octavia_provider/tests/unit/ovsdb/__init__.py ovn_octavia_provider/tests/unit/ovsdb/test_impl_idl_ovn.py ovn_octavia_provider/tests/unit/ovsdb/test_ovsdb_monitor.py ovn_octavia_provider/tests/unit/schemas/ovn-nb.ovsschema ovn_octavia_provider/tests/unit/schemas/ovn-sb.ovsschema playbooks/configure_functional_job.yaml playbooks/post_functional_job.yaml playbooks/run_functional_job.yaml releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml releasenotes/notes/add-neutron-client-interface-info-6a018cad49b5d240.yaml releasenotes/notes/add-sctp-support-bedfed905e1f5a58.yaml releasenotes/notes/adjust-and-deprecate-neutron-config-options-50edf01318758917.yaml releasenotes/notes/drop-python-3-6-and-3-7-e890961ed94c146e.yaml releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml releasenotes/notes/new-device-owner-627ee59b95b670b4.yaml releasenotes/notes/new-repository-for-ovn-octavia-provider-driver-dd81c4414c529c4e.yaml releasenotes/notes/ovn-octavia-provider-driver-multiple-protocols-4a93e184b8f374c7.yaml releasenotes/notes/remove-py38-4240ec2f24969054.yaml releasenotes/notes/session-persistence-b409428a8907f542.yaml releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/2024.2.rst releasenotes/source/2025.1.rst releasenotes/source/README.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder roles/configure_functional_tests/README.rst roles/configure_functional_tests/defaults/main.yaml roles/configure_functional_tests/tasks/main.yaml roles/fetch_journal_log/README.rst roles/fetch_journal_log/defaults/main.yaml roles/fetch_journal_log/tasks/main.yaml roles/setup_logdir/README.rst roles/setup_logdir/defaults/main.yaml roles/setup_logdir/tasks/main.yaml tools/check_unit_test_structure.sh tools/coding-checks.sh tools/generate_config_file_samples.sh tools/pip_install_src_modules.sh zuul.d/base.yaml zuul.d/project.yaml././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider.egg-info/dependency_links.txt0000664000175100017510000000000115033037524027761 0ustar00mylesmyles ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider.egg-info/entry_points.txt0000664000175100017510000000054415033037524027214 0ustar00mylesmyles[console_scripts] octavia-ovn-db-sync-util = ovn_octavia_provider.cmd.octavia_ovn_db_sync_util:main [octavia.api.drivers] ovn = ovn_octavia_provider.driver:OvnProviderDriver [octavia.driver_agent.provider_agents] ovn = ovn_octavia_provider.agent:OvnProviderAgent [oslo.config.opts] octavia.api.drivers.ovn = ovn_octavia_provider.common.config:list_opts ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider.egg-info/not-zip-safe0000664000175100017510000000000115033037524026141 0ustar00mylesmyles ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider.egg-info/pbr.json0000664000175100017510000000005715033037524025373 0ustar00mylesmyles{"git_version": "e9707b8", "is_release": false}././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider.egg-info/requires.txt0000664000175100017510000000042315033037524026312 0ustar00mylesmyleskeystoneauth1>=3.14.0 netaddr>=0.7.18 neutron-lib>=3.8.0 openstacksdk>=0.103.0 oslo.config>=8.0.0 oslo.log>=4.3.0 oslo.messaging>=12.4.0 oslo.serialization>=2.28.1 oslo.utils>=4.5.0 ovs>=2.10.0 ovsdbapp>=2.1.0 pbr>=4.0.0 SQLAlchemy>=1.4.23 tenacity>=6.0.0 octavia-lib>=2.2.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/ovn_octavia_provider.egg-info/top_level.txt0000664000175100017510000000002515033037524026442 0ustar00mylesmylesovn_octavia_provider ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5169845 ovn_octavia_provider-8.1.0.dev15/playbooks/0000775000175100017510000000000015033037526020004 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/playbooks/configure_functional_job.yaml0000664000175100017510000000011215033037524025715 0ustar00mylesmyles- hosts: all roles: - setup_logdir - configure_functional_tests ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/playbooks/post_functional_job.yaml0000664000175100017510000000014015033037524024722 0ustar00mylesmyles- hosts: all roles: - fetch_journal_log - fetch-tox-output - fetch-subunit-output ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/playbooks/run_functional_job.yaml0000664000175100017510000000023015033037524024541 0ustar00mylesmyles- hosts: all roles: - role: bindep bindep_profile: test bindep_dir: "{{ zuul_work_dir }}" - test-setup - ensure-tox - tox ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5119846 ovn_octavia_provider-8.1.0.dev15/releasenotes/0000775000175100017510000000000015033037526020472 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5179846 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/0000775000175100017510000000000015033037526021622 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml0000664000175100017510000000074715033037524027363 0ustar00mylesmyles--- features: - | The OVN Octavia provider drvier now supports health monitoring. TCP and UDP Connect health monitors are now supported by the provider driver, when the underlying OVN version supports them. The health monitor uses the OVN distributed DHCP port as the source IP for messages by default, if one doesn't exist then a port will be created on each given subnet. The list of member ports to monitor is updated whenever one is created or deleted. ././@PaxHeader0000000000000000000000000000021300000000000010211 xustar00117 path=ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/add-neutron-client-interface-info-6a018cad49b5d240.yaml 22 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/add-neutron-client-interface-info-6a018cad49b5d20000664000175100017510000000023615033037524032166 0ustar00mylesmyles--- fixes: - | [`bug 2110488 `_] Fixed wrong endpoint information in Neutron client configuration. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/add-sctp-support-bedfed905e1f5a58.yaml0000664000175100017510000000012615033037524030360 0ustar00mylesmyles--- features: - | Add support for the SCTP protocol in the OVN provider driver. ././@PaxHeader0000000000000000000000000000022500000000000010214 xustar00127 path=ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/adjust-and-deprecate-neutron-config-options-50edf01318758917.yaml 22 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/adjust-and-deprecate-neutron-config-options-50ed0000664000175100017510000000217115033037524032707 0ustar00mylesmyles--- upgrade: - | Authentication settings for Neutron should be added directly to the [neutron] section of the configuration now. The exact settings depend on the `auth_type` used. Refer to https://docs.openstack.org/keystoneauth/latest/plugin-options.html for a list of possible options. deprecations: - | As part of the effort to replace the deprecated `python-neutronclient` package in Octavia the following options in the [neutron] section of the Octavia configuration file have been marked as deprecated for removal: `endpoint` is replaced by the `endpoint_override` option, `endpoint_type` is replaced by the `valid_interfaces` option, and `ca_certificates_file` is replaced by the `cafile` option. In a future release `ovn-octavia-provider` will no longer take the authentication settings from the [service_auth] section as a fallback. It will require them to be in the [neutron] section. other: - | Replaced code that uses the deprecated `python-neutronclient` library with code that uses `openstacksdk` and removed `python-neutronclient` as a dependency. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/drop-python-3-6-and-3-7-e890961ed94c146e.yaml0000664000175100017510000000020115033037524030503 0ustar00mylesmyles--- upgrade: - | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml0000664000175100017510000000144615033037524027045 0ustar00mylesmyles--- issues: - | Load Balancer Health Check for Floating IPs are not populated with the protocol port. At this way, when a backend is detected on ERROR state requests to the Floating IP are still distribute to the ERROR'ed members. In order to fix the existing Load Balancer Health Checks it is required to recreate the entire Octavia Health Monitor, which will recreate the associated OVN Load Balancer Health Check(s). fixes: - | [`bug 1997418 `_] Added the protocol port to the Load Balancer Health Check associated with the Floating IP, additional fields have been introduced to the external_ids to provide more accuracy information about the entities affected by any change over the Floating IP or LB VIP.././@PaxHeader0000000000000000000000000000021100000000000010207 xustar00115 path=ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml 22 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de6577470000664000175100017510000000026315033037524031243 0ustar00mylesmyles--- fixes: - | [`bug 2072754 `_] Fixed maintenance task that was breaking IPv4 load balancers with health monitors. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml0000664000175100017510000000077115033037524032063 0ustar00mylesmyles--- fixes: - | A maintenance task process has been added to update the existing OVN LB HM ports to the new behaviour defined. Specifically, the "device_owner" field needs to be updated from network:distributed to ovn-lb-hm:distributed. Additionally, the "device_id" will be populated during update action. other: - | A maintenance task thread has been added to work on periodic and one-shot tasks that also allows the future changes to perform the needed upgrades actions. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/new-device-owner-627ee59b95b670b4.yaml0000664000175100017510000000025115033037524030031 0ustar00mylesmyles--- upgrade: - | In order to support the new 'device_owner' for OVN Load Balancer Health Monitor ports this version requires a Neutron version > 23.0.0rc2 ././@PaxHeader0000000000000000000000000000023000000000000010210 xustar00130 path=ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/new-repository-for-ovn-octavia-provider-driver-dd81c4414c529c4e.yaml 22 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/new-repository-for-ovn-octavia-provider-driver-d0000664000175100017510000000036315033037524033125 0ustar00mylesmyles--- prelude: > OVN Octavia provider driver has been created from the networking-ovn repository. upgrade: - | OVN Octavia Provider driver registers under the same entry point. There is no action to be done from operator side. ././@PaxHeader0000000000000000000000000000023000000000000010210 xustar00130 path=ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/ovn-octavia-provider-driver-multiple-protocols-4a93e184b8f374c7.yaml 22 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/ovn-octavia-provider-driver-multiple-protocols-40000664000175100017510000000023115033037524033122 0ustar00mylesmyles--- fixes: - | OVN Octavia provider driver now supports both TCP and UDP pool/listener protocols configured in the same Octavia Load Balancer. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/remove-py38-4240ec2f24969054.yaml0000664000175100017510000000016615033037524026603 0ustar00mylesmyles--- upgrade: - | Python 3.8 support was dropped. The minimum version of Python now supported is Python 3.9. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924565.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/reno.cache0000664000175100017510000003300415033037525023551 0ustar00mylesmyles--- dates: - date: 1696417847 version: 5.0.0 - date: 1679488315 version: 4.0.0 - date: 1627996039 version: 1.1.0 - date: 1681994375 version: xena-em - date: 1710400686 version: 6.0.0.0rc1 - date: 1686577422 version: 2.1.1 - date: 1743591230 version: 8.0.0 - date: 1627386758 version: 0.1.3 - date: 1733853225 version: yoga-eol - date: 1592499421 version: 0.2.0 - date: 1598476205 version: 0.1.2 - date: 1695895613 version: 5.0.0.0rc2 - date: 1709727624 version: xena-eom - date: 1677757512 version: 4.0.0.0rc1 - date: 1694786736 version: 5.0.0.0rc1 - date: 1630429832 version: 1.1.1 - date: 1709727150 version: wallaby-eom - date: 1678785514 version: 1.3.0 - date: 1593423081 version: 0.1.1 - date: 1678785869 version: 2.1.0 - date: 1686234856 version: 3.1.1 - date: 1733853641 version: zed-eol - date: 1643733994 version: 0.4.1 - date: 1727866346 version: 7.0.0 - date: 1651072179 version: victoria-em - date: 1617985100 version: 0.4.0 - date: 1690535343 version: 2.2.0 - date: 1616758878 version: 1.0.0 - date: 1636977817 version: ussuri-em - date: 1667403903 version: 1.0.2 - date: 1686234776 version: 4.0.1 - date: 1733851683 version: wallaby-eol - date: 1714467263 version: zed-eom - date: 1722522234 version: 4.0.2 - date: 1664972285 version: 3.0.0 - date: 1679067378 version: 4.0.0.0rc2 - date: 1709726545 version: victoria-eom - date: 1585851408 version: 0.1.0 - date: 1733816265 version: victoria-eol - date: 1731580486 version: 2023.1-eom - date: 1646914404 version: 2.0.0.0rc1 - date: 1648641204 version: 2.0.0 - date: 1663339180 version: 3.0.0.0rc1 - date: 1681384551 version: 1.3.1 - date: 1741957763 version: 8.0.0.0rc1 - date: 1678796160 version: 3.1.0 - date: 1726150196 version: 7.0.0.0rc1 - date: 1674128944 version: 1.2.0 - date: 1739799700 version: 5.1.0 - date: 1707151613 version: yoga-eom - date: 1600964647 version: 0.3.0 - date: 1712142320 version: 6.0.0 - date: 1746002175 version: 2023.2-eol - date: 1733851664 version: xena-eol - date: 1659020711 version: 1.0.1 - date: 1705503647 version: ussuri-eol - date: 1709290708 version: 3.1.2 - date: 1602841151 version: 0.3.1 - date: 1667474322 version: wallaby-em file-contents: releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml: features: - 'The OVN Octavia provider drvier now supports health monitoring. TCP and UDP Connect health monitors are now supported by the provider driver, when the underlying OVN version supports them. The health monitor uses the OVN distributed DHCP port as the source IP for messages by default, if one doesn''t exist then a port will be created on each given subnet. The list of member ports to monitor is updated whenever one is created or deleted. ' releasenotes/notes/add-neutron-client-interface-info-6a018cad49b5d240.yaml: fixes: - '[`bug 2110488 `_] Fixed wrong endpoint information in Neutron client configuration. ' releasenotes/notes/add-sctp-support-bedfed905e1f5a58.yaml: features: - 'Add support for the SCTP protocol in the OVN provider driver. ' releasenotes/notes/adjust-and-deprecate-neutron-config-options-50edf01318758917.yaml: deprecations: - 'As part of the effort to replace the deprecated `python-neutronclient` package in Octavia the following options in the [neutron] section of the Octavia configuration file have been marked as deprecated for removal: `endpoint` is replaced by the `endpoint_override` option, `endpoint_type` is replaced by the `valid_interfaces` option, and `ca_certificates_file` is replaced by the `cafile` option. In a future release `ovn-octavia-provider` will no longer take the authentication settings from the [service_auth] section as a fallback. It will require them to be in the [neutron] section. ' other: - 'Replaced code that uses the deprecated `python-neutronclient` library with code that uses `openstacksdk` and removed `python-neutronclient` as a dependency. ' upgrade: - 'Authentication settings for Neutron should be added directly to the [neutron] section of the configuration now. The exact settings depend on the `auth_type` used. Refer to https://docs.openstack.org/keystoneauth/latest/plugin-options.html for a list of possible options. ' releasenotes/notes/drop-python-3-6-and-3-7-e890961ed94c146e.yaml: upgrade: - 'Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ' releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml: fixes: - '[`bug 1997418 `_] Added the protocol port to the Load Balancer Health Check associated with the Floating IP, additional fields have been introduced to the external_ids to provide more accuracy information about the entities affected by any change over the Floating IP or LB VIP.' issues: - 'Load Balancer Health Check for Floating IPs are not populated with the protocol port. At this way, when a backend is detected on ERROR state requests to the Floating IP are still distribute to the ERROR''ed members. In order to fix the existing Load Balancer Health Checks it is required to recreate the entire Octavia Health Monitor, which will recreate the associated OVN Load Balancer Health Check(s). ' releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml: fixes: - '[`bug 2072754 `_] Fixed maintenance task that was breaking IPv4 load balancers with health monitors. ' releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml: fixes: - 'A maintenance task process has been added to update the existing OVN LB HM ports to the new behaviour defined. Specifically, the "device_owner" field needs to be updated from network:distributed to ovn-lb-hm:distributed. Additionally, the "device_id" will be populated during update action. ' other: - 'A maintenance task thread has been added to work on periodic and one-shot tasks that also allows the future changes to perform the needed upgrades actions. ' releasenotes/notes/new-device-owner-627ee59b95b670b4.yaml: upgrade: - 'In order to support the new ''device_owner'' for OVN Load Balancer Health Monitor ports this version requires a Neutron version > 23.0.0rc2 ' releasenotes/notes/new-repository-for-ovn-octavia-provider-driver-dd81c4414c529c4e.yaml: prelude: 'OVN Octavia provider driver has been created from the networking-ovn repository. ' upgrade: - 'OVN Octavia Provider driver registers under the same entry point. There is no action to be done from operator side. ' releasenotes/notes/ovn-octavia-provider-driver-multiple-protocols-4a93e184b8f374c7.yaml: fixes: - 'OVN Octavia provider driver now supports both TCP and UDP pool/listener protocols configured in the same Octavia Load Balancer. ' releasenotes/notes/remove-py38-4240ec2f24969054.yaml: upgrade: - 'Python 3.8 support was dropped. The minimum version of Python now supported is Python 3.9. ' releasenotes/notes/session-persistence-b409428a8907f542.yaml: features: - 'Now the OVN Octavia provider uses the affinity_timeout option of OVN Load Balancers to support pools sessions persistence. It only supports the SOURCE_IP option type. If not timeout is set, by default 360 seconds is set if the session persistence is enabled. ' releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml: fixes: - 'Creating members without specifying a subnet ID is now supported. Since the subnet ID is an optional API argument, if not given the provider driver will now attempt to look it up via the pool ID that is a required argument. ' notes: - files: - - releasenotes/notes/add-neutron-client-interface-info-6a018cad49b5d240.yaml - !!binary | N2ExN2NkMWFlMzExYTg4YTlkYjJlZTc1NDEzYjExNTEwMGRiZjAxMg== version: 8.0.0-10 - files: - - releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml - !!binary | YmY4Njg0NjIwNTcxOGY2NTI0NTdhZWMxOTQzZTRjNTMzMzRkZDJhNg== version: 0.1.3-24 - files: - - releasenotes/notes/new-repository-for-ovn-octavia-provider-driver-dd81c4414c529c4e.yaml - !!binary | MTg0ZjYyOWYxN2EwZGRiYTU1ZDUwMmM5NWYxNDkzOTMwZDU5OTY3Nw== - - releasenotes/notes/ovn-octavia-provider-driver-multiple-protocols-4a93e184b8f374c7.yaml - !!binary | MTUyNjBiNzQzOWQ2YjYwNTMwZjhlNzI4Njc0ZjA3NDcwYmMxNmM0Mg== version: 0.1.0 - files: - - releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml - !!binary | Mzg2MGNhMDE1OWY1MGJkNTMwYzIxOThhNTVjNTA3MjRkYjAyY2MwYw== version: 0.4.1-23 - files: - - releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml - !!binary | OWVlYTBiOWM1ZGUwYTdhMGM4NjA0ZDA4YmU0ZDM0MTY5NjViY2YyOQ== version: 1.0.2-25 - files: - - releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml - !!binary | NTllMDlmMDUzZTg5ZWVlNzg1OWUxZDM4NGNhOTU1NjFhNDVhZGNkZA== - - releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml - !!binary | MDg0MzBiNTM1ZWI1NmUzNzU4ZTYzZjJhOTdjN2UzYTM4Mzc2OWJlZQ== version: 1.0.1 - files: - - releasenotes/notes/add-sctp-support-bedfed905e1f5a58.yaml - !!binary | Yjc5NmY0ZWJlMDUyZWJhY2UyOWYyOWMwNmU4ODc2MzQyYTQ5ZjI2ZQ== version: 1.0.0 - files: - - releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml - !!binary | OGVlZjBiZWIxZDU1NGY1YmZiNzQyNjcyM2E4MzAwMzM5OTlmYmVjNQ== version: 1.3.1-12 - files: - - releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml - !!binary | MDg5NmQ1ZjRlYzc3ZmViYzkzOWMzZThiMzNkOTRmOTM0ZWRkZjgwNQ== version: 1.2.0 - files: - - releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml - !!binary | OGJiZDhmMWI2MmQ3ODExNDMzMDc5YmZmOGNlOTQwZmJkZDA0MWEwYw== version: 1.1.0 - files: - - releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml - !!binary | NDgwYmZjMzQ1NTc3MjBlMmVjY2I4YTlkZDQ2NDk0YWIxOGRjYWRhMA== version: 2.2.0 - files: - - releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml - !!binary | Njc1ZWE5YzM1ZWY3Mzc2NTgyOGUzZGI0YjYzNmYxMzQ2NWE3NjU5Ng== version: 2.0.0 - files: - - releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml - !!binary | NzI2ZmM0NTI4ODE5ZmEzMjU2OThhZjk3ZjE1YjRiMzViNDk4OGM3OA== version: 3.1.2 - files: - - releasenotes/notes/drop-python-3-6-and-3-7-e890961ed94c146e.yaml - !!binary | MTY5NzhkZTUyODZkZjBlYzU5MjNkY2NhMWIyZDIyOGFmMDgxMGU0YQ== version: 3.0.0 - files: - - releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml - !!binary | ZWNiOGRjMWJhNjQ4N2VlODE0ODUxM2ExZTcyODg4YjE2Nzk4MGRmZA== version: 2023.1-eom - files: - - releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml - !!binary | ZTYzOTViZDQ4NWM2YjA2MWU4ODRmZmRjMWM5YTMyY2M5NmY2NWQ3YQ== - - releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml - !!binary | MmY3MmJjODcwYjAxYmNhNmUxNjI0NjA3NDg4MTE0NWE1MjIzY2JhNQ== - - releasenotes/notes/new-device-owner-627ee59b95b670b4.yaml - !!binary | M2M3M2FiOTBjNjFlM2VhNTc5ODhiNjBkYzBjNmQ0NTc3MDA5MTlhYg== version: 4.0.2 - files: - - releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml - !!binary | NjgyM2YzOTM5OTMxM2ZiZWZhZGMyODYwOTU2YzUyMjY4ZjdmMmU3Nw== - - releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml - !!binary | OWE0MTBkMWI0MjA2YjVmZWNmZjgxMTMwYjEwZmRiNTQyOWUwNGJhMw== - - releasenotes/notes/new-device-owner-627ee59b95b670b4.yaml - !!binary | NzQ2NDVlMjczZmQyNTZhYTk1MTg4YjRiYmNiN2U5NmU1NDM1YmJhNw== version: 2023.2-eol - files: - - releasenotes/notes/adjust-and-deprecate-neutron-config-options-50edf01318758917.yaml - !!binary | MjIwZDhjODU4MWMwNzgzOWI5YWZhODdmNGZmZWU5MWE1NDc1OTNkNQ== - - releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml - !!binary | ZWJmYmQ4NDhiMWQ1N2E0NDVlNGEwMzM2NDExNDkxYWZlMjBhZmE4Zg== - - releasenotes/notes/session-persistence-b409428a8907f542.yaml - !!binary | MzgyZGRiMDMyOWY5Mzg3M2UyNWE1NWJlNjViZjQzMDAwMzMyYTIxYQ== version: 5.0.0 - files: - - releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml - !!binary | OGNhYmZjNWI3Y2Y2MzBkNzIxNjJjMDk4MGY3NzcwOGE2NTA4NmVhNw== version: 6.0.0-15 - files: - - releasenotes/notes/maintenance-task-framework-a366242c89f5af11.yaml - !!binary | MTY2MWYzODE1YzE1MThhYTJlM2YxNzYxZTMzZWM0OTRiZTMxZDU4NA== - - releasenotes/notes/new-device-owner-627ee59b95b670b4.yaml - !!binary | ZTJkYmM1OWJlNTE1OTdhZGJjNTQ4MzhmYzUzNjdhNDliMTlhZDVhMg== version: 6.0.0 - files: - - releasenotes/notes/maintenance-task-bugfix-2072754-cc20f614de657747.yaml - !!binary | YWUxNTQwYmIxYTA0NDY0YzcwNjVlNTQyZWM1ZTk4MTk0NzI0N2YzYg== version: 7.0.0 - files: - - releasenotes/notes/add-neutron-client-interface-info-6a018cad49b5d240.yaml - !!binary | MDQwMWNjYjBmNTk1MDUyNThjM2Y0NDc3OTEwYzljMGM3MGNkZTYyMA== version: 8.0.0-4 - files: - - releasenotes/notes/remove-py38-4240ec2f24969054.yaml - !!binary | ZWRjMDFjYTY3MmMyZDhkZDdmZDA0YjA0ZmY1Yzc2NDNkZjllN2VjYg== version: 8.0.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/session-persistence-b409428a8907f542.yaml0000664000175100017510000000044715033037524030522 0ustar00mylesmyles--- features: - | Now the OVN Octavia provider uses the affinity_timeout option of OVN Load Balancers to support pools sessions persistence. It only supports the SOURCE_IP option type. If not timeout is set, by default 360 seconds is set if the session persistence is enabled. ././@PaxHeader0000000000000000000000000000022000000000000010207 xustar00122 path=ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6ac453f28.yaml 22 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/notes/support-member-create-without-subnetid-0b4e3aa6a0000664000175100017510000000040115033037524032640 0ustar00mylesmyles--- fixes: - | Creating members without specifying a subnet ID is now supported. Since the subnet ID is an optional API argument, if not given the provider driver will now attempt to look it up via the pool ID that is a required argument. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5179846 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/0000775000175100017510000000000015033037526021772 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/2023.1.rst0000664000175100017510000000021015033037524023240 0ustar00mylesmyles=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: unmaintained/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/2023.2.rst0000664000175100017510000000020215033037524023242 0ustar00mylesmyles=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/2024.1.rst0000664000175100017510000000020215033037524023242 0ustar00mylesmyles=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/2024.2.rst0000664000175100017510000000020215033037524023243 0ustar00mylesmyles=========================== 2024.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/2025.1.rst0000664000175100017510000000020215033037524023243 0ustar00mylesmyles=========================== 2025.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2025.1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/README.rst0000664000175100017510000000270015033037524023456 0ustar00mylesmyles======================================== OVN Octavia Provider Release Notes Howto ======================================== Release notes are a new feature for documenting new features in OpenStack projects. Background on the process, tooling, and methodology is documented in a `mailing list post by Doug Hellmann `_. Writing release notes --------------------- For information on how to create release notes, please consult the `reno documentation `__. Please keep the following in your mind when you write release notes. * **Avoid using "prelude" section** for individual release notes. "prelude" section is for general comments about the release. * **Use one entry per section** (like "feature" or "upgrade"). All entries which belong to a same release will be merged and rendered, so there is less meaning to use multiple entries by a single topic. Maintaining release notes ------------------------- .. warning:: Avoid modifying an existing release note file even though it is related to your change. If you modify a release note file of a past release, the whole content will be shown in a latest release. The only allowed case is to update a release note in a same release. If you need to update a release note of a past release, edit a corresponding release note file in a stable branch directly. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/_static/0000775000175100017510000000000015033037526023420 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/_static/.placeholder0000664000175100017510000000000015033037524025667 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/_templates/0000775000175100017510000000000015033037526024127 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/_templates/.placeholder0000664000175100017510000000000015033037524026376 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/conf.py0000664000175100017510000002066515033037524023300 0ustar00mylesmyles# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # OVN Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/ovn-octavia-provider' openstackdocs_bug_project = 'neutron' openstackdocs_bug_tag = 'ovn-octavia-provider' openstackdocs_auto_name = False # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2020, Neutron Developers' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'OVNOctaviaProviderReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'OVNOctaviaProviderReleaseNotes.tex', 'OVN Octavia Provider Release Notes Documentation', 'Neutron Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ovnoctaviaproviderreleasenotes', 'OVN Octavia Provider Release Notes Documentation', ['Neutron Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'OVNOctaviaProviderReleaseNotes', 'OVN Octavia Provider Release Notes Documentation', 'Neutron Developers', 'OVNOctaviaProviderReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/index.rst0000664000175100017510000000046015033037524023631 0ustar00mylesmyles=================================== OVN Octavia Provider Release Notes =================================== .. toctree:: :maxdepth: 1 unreleased 2025.1 2024.2 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri .. toctree:: :maxdepth: 1 README.rst ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/unreleased.rst0000664000175100017510000000015615033037524024653 0ustar00mylesmyles============================= Current Series Release Notes ============================= .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/ussuri.rst0000664000175100017510000000020215033037524024046 0ustar00mylesmyles=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/victoria.rst0000664000175100017510000000022015033037524024334 0ustar00mylesmyles============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/wallaby.rst0000664000175100017510000000021415033037524024152 0ustar00mylesmyles============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/xena.rst0000664000175100017510000000020015033037524023445 0ustar00mylesmyles========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/yoga.rst0000664000175100017510000000020015033037524023451 0ustar00mylesmyles========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/releasenotes/source/zed.rst0000664000175100017510000000017415033037524023306 0ustar00mylesmyles======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/requirements.txt0000664000175100017510000000157615033037524021274 0ustar00mylesmyles# Requirements lower bounds listed here are our best effort to keep them up to # date but we do not test them so no guarantee of having them all correct. If # you find any incorrect lower bounds, let us know or propose a fix. # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. keystoneauth1>=3.14.0 # Apache-2.0 netaddr>=0.7.18 # BSD neutron-lib>=3.8.0 # Apache-2.0 openstacksdk>=0.103.0 # Apache-2.0 oslo.config>=8.0.0 # Apache-2.0 oslo.log>=4.3.0 # Apache-2.0 oslo.messaging>=12.4.0 # Apache-2.0 oslo.serialization>=2.28.1 # Apache-2.0 oslo.utils>=4.5.0 # Apache-2.0 ovs>=2.10.0 # Apache-2.0 ovsdbapp>=2.1.0 # Apache-2.0 pbr>=4.0.0 # Apache-2.0 SQLAlchemy>=1.4.23 # MIT tenacity>=6.0.0 # Apache-2.0 octavia-lib>=2.2.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5119846 ovn_octavia_provider-8.1.0.dev15/roles/0000775000175100017510000000000015033037526017125 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/roles/configure_functional_tests/0000775000175100017510000000000015033037526024552 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/roles/configure_functional_tests/README.rst0000664000175100017510000000076315033037524026245 0ustar00mylesmylesConfigure host to run on it Neutron functional/fullstack tests **Role Variables** .. zuul:rolevar:: tests_venv :default: {{ tox_envlist }} .. zuul:rolevar:: project_name :default: neutron .. zuul:rolevar:: base_dir :default: {{ ansible_user_dir }}/src/opendev.org .. zuul:rolevar:: gate_dest_dir :default: {{ base_dir }}/openstack .. zuul:rolevar:: devstack_dir :default: {{ base_dir }}/openstack/devstack .. zuul:rolevar:: neutron_dir :default: {{ gate_dest_dir }}/neutron ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/roles/configure_functional_tests/defaults/0000775000175100017510000000000015033037526026361 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/roles/configure_functional_tests/defaults/main.yaml0000664000175100017510000000050415033037524030166 0ustar00mylesmylestests_venv: "{{ tox_envlist }}" project_name: "ovn-octavia-provider" base_dir: "{{ ansible_user_dir }}/src/opendev.org" gate_dest_dir: "{{ base_dir }}/openstack" devstack_dir: "{{ base_dir }}/openstack/devstack" neutron_dir: "{{ gate_dest_dir }}/neutron" ovn_octavia_provider_dir: "{{ gate_dest_dir }}/ovn-octavia-provider" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/roles/configure_functional_tests/tasks/0000775000175100017510000000000015033037526025677 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/roles/configure_functional_tests/tasks/main.yaml0000664000175100017510000000160615033037524027510 0ustar00mylesmyles- shell: cmd: | set -e set -x GATE_STACK_USER={{ ansible_user }} IS_GATE=True BASE_DIR={{ base_dir }} GATE_DEST={{ gate_dest_dir }} PROJECT_NAME={{ project_name }} NEUTRON_DIR={{ neutron_dir }} DEVSTACK_PATH={{ devstack_dir }} TOP_DIR={{ devstack_dir }} VENV={{ tests_venv }} STACK_USER=stack OVS_BRANCH={{ OVS_BRANCH }} OVN_BRANCH={{ OVN_BRANCH }} Q_BUILD_OVS_FROM_GIT={{ Q_BUILD_OVS_FROM_GIT }} # This is DB USER used in e.g. pgsql db DATABASE_USER=openstack_citest source $DEVSTACK_PATH/functions source $DEVSTACK_PATH/lib/neutron_plugins/ovs_source source $DEVSTACK_PATH/lib/neutron_plugins/ovn_agent source $NEUTRON_DIR/tools/configure_for_func_testing.sh configure_host_for_func_testing executable: /bin/bash ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/roles/fetch_journal_log/0000775000175100017510000000000015033037526022611 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/roles/fetch_journal_log/README.rst0000664000175100017510000000063515033037524024302 0ustar00mylesmylesCollect journal log from test run By default, this stores journal log into log file and store it in "journal_log_file_path" **Role Variables** .. zuul:rolevar:: journal_log_path :default: {{ ansible_user_dir }}/workspace/logs Path where journal log file will be stored on job's node. .. zuul:rolevar:: journal_log_file_name :default: {{ journal_log_path }}/journal.log Name of journal log file. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/roles/fetch_journal_log/defaults/0000775000175100017510000000000015033037526024420 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/roles/fetch_journal_log/defaults/main.yaml0000664000175100017510000000015415033037524026226 0ustar00mylesmylesjournal_log_path: "{{ ansible_user_dir }}/logs" journal_log_file_name: "{{ journal_log_path }}/journal.log" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/roles/fetch_journal_log/tasks/0000775000175100017510000000000015033037526023736 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/roles/fetch_journal_log/tasks/main.yaml0000664000175100017510000000102515033037524025542 0ustar00mylesmyles- name: Ensure {{ journal_log_path }} exists become: yes file: path: "{{ journal_log_path }}" state: directory owner: "{{ ansible_user }}" group: "{{ ansible_user }}" mode: 0775 - name: Store journal logs in {{ journal_log_file_name }} become: yes shell: cmd: | /bin/journalctl -a > {{ journal_log_file_name }} - name: Set journal.log file permissions become: yes file: path: '{{ journal_log_file_name }}' owner: '{{ ansible_user }}' group: '{{ ansible_user }}' mode: 0644 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/roles/setup_logdir/0000775000175100017510000000000015033037526021625 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/roles/setup_logdir/README.rst0000664000175100017510000000027015033037524023311 0ustar00mylesmylesConfigure logs dir to be accessible for ``stack`` user. **Role Variables** .. zuul:rolevar:: logdir :default: /opt/stack/logs Name of the directory where logs will be stored. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/roles/setup_logdir/defaults/0000775000175100017510000000000015033037526023434 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/roles/setup_logdir/defaults/main.yaml0000664000175100017510000000003015033037524025233 0ustar00mylesmyleslogdir: /opt/stack/logs ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/roles/setup_logdir/tasks/0000775000175100017510000000000015033037526022752 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/roles/setup_logdir/tasks/main.yaml0000664000175100017510000000024115033037524024555 0ustar00mylesmyles- name: Ensure logdir exists become: yes file: path: "{{ logdir }}" state: directory owner: stack group: "{{ ansible_user }}" mode: 0775 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5199845 ovn_octavia_provider-8.1.0.dev15/setup.cfg0000664000175100017510000000232315033037526017622 0ustar00mylesmyles[metadata] name = ovn-octavia-provider summary = OpenStack Octavia integration with OVN description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/ovn-octavia-provider/latest/ python_requires = >=3.9 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 [files] packages = ovn_octavia_provider [global] setup_hooks = pbr.hooks.setup_hook [entry_points] octavia.api.drivers = ovn = ovn_octavia_provider.driver:OvnProviderDriver console_scripts = octavia-ovn-db-sync-util = ovn_octavia_provider.cmd.octavia_ovn_db_sync_util:main octavia.driver_agent.provider_agents = ovn = ovn_octavia_provider.agent:OvnProviderAgent oslo.config.opts = octavia.api.drivers.ovn = ovn_octavia_provider.common.config:list_opts [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/setup.py0000664000175100017510000000200615033037524017507 0ustar00mylesmyles# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/test-requirements.txt0000664000175100017510000000116515033037524022243 0ustar00mylesmyles# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking>=6.1.0,<6.2.0 # Apache-2.0 bandit!=1.6.0,>=1.1.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 flake8-import-order>=0.18.0,<0.19.0 # LGPLv3 python-subunit>=1.0.0 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 pylint>=2.6.0 # GPLv2 testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD WebTest>=2.0.27 # MIT testtools>=2.2.0 # MIT neutron>=23.0.0.0b3 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/tools/0000775000175100017510000000000015033037526017141 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/tools/check_unit_test_structure.sh0000775000175100017510000000331115033037524024767 0ustar00mylesmyles#!/usr/bin/env bash # This script identifies the unit test modules that do not correspond # directly with a module in the code tree. See TESTING.rst for the # intended structure. repo_path=$(cd "$(dirname "$0")/.." && pwd) base_test_path=ovn_octavia_provider/tests/unit test_path=$repo_path/$base_test_path test_files=$(find ${test_path} -iname 'test_*.py') ignore_regexes=( # Exceptional cases that should be skipped can be added here # EXAMPLE: "^objects/test_objects.py$" ) error_count=0 ignore_count=0 total_count=0 for test_file in ${test_files[@]}; do relative_path=${test_file#$test_path/} expected_path=$(dirname $repo_path/ovn_octavia_provider/$relative_path) test_filename=$(basename "$test_file") expected_filename=${test_filename#test_} # Module filename (e.g. foo/bar.py -> foo/test_bar.py) filename=$expected_path/$expected_filename # Package dir (e.g. foo/ -> test_foo.py) package_dir=${filename%.py} if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then for ignore_regex in ${ignore_regexes[@]}; do if [[ "$relative_path" =~ $ignore_regex ]]; then ignore_count=$((ignore_count + 1)) continue 2 fi done echo "Unexpected test file: $base_test_path/$relative_path" error_count=$((error_count + 1)) fi total_count=$((total_count + 1)) done if [ "$ignore_count" -ne 0 ]; then echo "$ignore_count unmatched test modules were ignored" fi if [ "$error_count" -eq 0 ]; then echo 'Success! All test modules match targets in the code tree.' exit 0 else echo "Failure! $error_count of $total_count test modules do not match targets in the code tree." exit 1 fi ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/tools/coding-checks.sh0000775000175100017510000000306315033037524022201 0ustar00mylesmyles#!/bin/sh # This script is copied from neutron and adapted for networking-ovn. set -eu usage () { echo "Usage: $0 [OPTION]..." echo "Run ovn_octavia_provider's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire ovn_octavia_provider module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } join_args() { if [ -z "$scriptargs" ]; then scriptargs="$opt" else scriptargs="$scriptargs $opt" fi } process_options () { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) join_args;; esac i=$((i+1)) done } run_pylint () { local target="${scriptargs:-all}" if [ "$target" = "all" ]; then files="ovn_octavia_provider" else case "$target" in *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; *) echo "$target is an unrecognized basecommit"; exit 1;; esac fi echo "Running pylint..." echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." if [ -n "${files}" ]; then pylint --rcfile=.pylintrc --output-format=colorized ${files} else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/tools/generate_config_file_samples.sh0000775000175100017510000000143015033037524025336 0ustar00mylesmyles#!/bin/sh # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -e GEN_CMD=oslo-config-generator if ! type "$GEN_CMD" > /dev/null; then echo "ERROR: $GEN_CMD not installed on the system." exit 1 fi for file in `ls etc/oslo-config-generator/*`; do $GEN_CMD --config-file=$file done ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/tools/pip_install_src_modules.sh0000775000175100017510000000122215033037524024410 0ustar00mylesmyles#!/bin/bash # For networking-ovn unit tests, you can define git repos containing modules # that you want to use to override the requirements-based packages. # # Why, you ask? Because you made changes to neutron-lib, and you want # run the unit tests together. E.g.: # # env TOX_ENV_SRC_MODULES="$HOME/src/neutron-lib" tox -e py37 toxinidir="$1" if [ -z "$TOX_ENV_SRC_MODULES" ]; then exit 0 fi for repo in $TOX_ENV_SRC_MODULES; do d="${toxinidir}/${repo}" if [ ! -d "$d" ]; then echo "tox_env_src: error: no directory found at $d" continue fi echo "tox_env_src: pip installing from $d" pip install -e "$d" done ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/tox.ini0000664000175100017510000001245515033037524017321 0ustar00mylesmyles[tox] minversion = 3.18.0 envlist = docs,py3,pep8 skipsdist = False ignore_basepython_conflict = True [testenv] basepython = python3 usedevelop = True setenv = VIRTUAL_ENV={envdir} OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true} OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true} OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true} PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt allowlist_externals = bash {toxinidir}/tools/pip_install_src_modules.sh passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY TOX_ENV_SRC_MODULES commands = {toxinidir}/tools/pip_install_src_modules.sh "{toxinidir}" stestr run {posargs} [testenv:pep8] commands = flake8 {toxinidir}/tools/check_unit_test_structure.sh {toxinidir}/tools/coding-checks.sh --pylint '{posargs}' {[testenv:bandit]commands} {[testenv:genconfig]commands} allowlist_externals = {toxinidir}/tools/check_unit_test_structure.sh {toxinidir}/tools/coding-checks.sh {toxinidir}/tools/generate_config_file_samples.sh [testenv:venv] commands = {posargs} [testenv:functional] setenv = {[testenv]setenv} OS_TEST_PATH=./ovn_octavia_provider/tests/functional OS_TEST_TIMEOUT=240 deps = {[testenv]deps} -r{toxinidir}/ovn_octavia_provider/tests/functional/requirements.txt [testenv:dsvm] # Fake job to define environment variables shared between dsvm jobs setenv = OS_TEST_TIMEOUT=240 OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} commands = false [testenv:dsvm-functional] setenv = {[testenv:functional]setenv} {[testenv:dsvm]setenv} deps = {[testenv:functional]deps} commands = stestr run --isolated {posargs} [testenv:cover] setenv = {[testenv]setenv} PYTHON=coverage run --source ovn_octavia_provider --parallel-mode commands = stestr run --no-subunit-trace {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml coverage report --fail-under=92 --skip-covered [testenv:docs] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt -r{toxinidir}/requirements.txt commands = sphinx-build -W -b html doc/source doc/build/html [testenv:pdf-docs] deps = {[testenv:docs]deps} allowlist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:debug] commands = oslo_debug_helper -t ovn_octavia_provider/tests {posargs} [testenv:releasenotes] deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [flake8] # I202 Additional newline in a group of imports # W504 line break after binary operator ignore = W504,I202 # H106: Don't put vim configuration in source files # H203: Use assertIs(Not)None to check for None # H204: Use assert(Not)Equal to check for equality # H205: Use assert(Greater|Less)(Equal) for comparison # H904: Delay string interpolations at logging calls enable-extensions=H106,H203,H204,H205,H904 show-source = True exclude=./.*,dist,doc,*egg*,build,releasenotes import-order-style = pep8 [hacking] import_exceptions = ovn_octavia_provider.i18n [flake8:local-plugins] extension = N322 = checks:check_assert_called_once_with N328 = checks:check_asserttruefalse N330 = checks:check_assertempty N331 = checks:check_assertisinstance N332 = checks:check_assertequal_for_httpcode N343 = checks:check_no_imports_from_tests N344 = checks:check_python3_no_filter N347 = checks:check_no_import_mock N348 = checks:check_assertcountequal paths =./ovn_octavia_provider/hacking [testenv:genconfig] commands = {toxinidir}/tools/generate_config_file_samples.sh allowlist_externals = {toxinidir}/tools/generate_config_file_samples.sh # This environment can be used to quickly validate that all needed system # packages required to successfully execute test targets are installed [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files. deps = bindep commands = bindep test [testenv:requirements] deps = -egit+https://opendev.org/openstack/requirements#egg=openstack-requirements allowlist_externals = bash commands = bash -c '{envdir}/src/openstack-requirements/playbooks/files/project-requirements-change.py --req {envdir}/src/openstack-requirements --local {toxinidir} master' [testenv:bandit] deps = -r{toxinidir}/test-requirements.txt commands = bandit -r ovn_octavia_provider -x tests -n5 [testenv:dev] # run locally (not in the gate) using editable mode # https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs commands = pip install -q -e "git+https://git.openstack.org/openstack/neutron#egg=neutron" {[testenv]commands} [testenv:pep8-dev] deps = {[testenv]deps} commands = {[testenv:dev]commands} {[testenv:pep8]commands} ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924565.5189846 ovn_octavia_provider-8.1.0.dev15/zuul.d/0000775000175100017510000000000015033037526017222 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/zuul.d/base.yaml0000664000175100017510000001522215033037524021020 0ustar00mylesmyles- job: name: ovn-octavia-provider-functional-base parent: devstack-minimal description: Run OVN Octavia provider functional tests timeout: 7800 required-projects: - opendev.org/openstack/devstack - openstack/neutron - openstack/requirements roles: - zuul: openstack/devstack pre-run: playbooks/configure_functional_job.yaml run: playbooks/run_functional_job.yaml post-run: playbooks/post_functional_job.yaml irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ vars: devstack_services: # Ignore any default set by devstack. Emit a "disable_all_services". base: false devstack_localrc: INSTALL_TESTONLY_PACKAGES: true DATABASE_PASSWORD: stackdb tox_envlist: dsvm-functional tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/openstack/requirements/upper-constraints.txt' zuul_copy_output: # We need to copy directory with logs to have it in job artifacts also, # /opt/stack/logs is default logs directory defined in neutron's # tox.ini file '{{ devstack_base_dir }}/logs/dsvm-functional-logs': logs - job: name: ovn-octavia-provider-functional-release parent: ovn-octavia-provider-functional-base description: Run OVN Octavia provider functional tests vars: OVN_BUILD_FROM_SOURCE: True Q_BUILD_OVS_FROM_GIT: True INSTALL_OVN: True OVN_BRANCH: branch-24.03 OVS_BRANCH: branch-3.3 - job: name: ovn-octavia-provider-functional-master parent: ovn-octavia-provider-functional-base description: Run OVN Octavia provider functional tests - OVN master vars: OVN_BUILD_FROM_SOURCE: True Q_BUILD_OVS_FROM_GIT: True INSTALL_OVN: True OVN_BRANCH: main # NOTE(froyo): OVN main branch following OVS stable branch OVS_BRANCH: branch-3.4 - job: name: ovn-octavia-provider-tempest-base parent: devstack-tempest abstract: true timeout: 7800 required-projects: - openstack/neutron - openstack/octavia - openstack/octavia-lib - openstack/octavia-tempest-plugin - openstack/python-octaviaclient - openstack/ovn-octavia-provider irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^etc/.*$ - ^releasenotes/.*$ vars: devstack_localrc: Q_AGENT: ovn OVN_AGENT_EXTENSIONS: 'metadata' # NOTE(ralonsoh): during the eventlet removal, the "logger" mech # driver has been removed from this list. Re-add it once the removal # is finished or the mech driver does not call monkey_patch(). Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn Q_ML2_PLUGIN_TYPE_DRIVERS: local,flat,vlan,geneve Q_ML2_TENANT_NETWORK_TYPE: geneve USE_PYTHON3: True TEMPEST_PLUGINS: '/opt/stack/octavia-tempest-plugin' OCTAVIA_NODE: api OCTAVIA_TEMPEST_PLUGIN_CUSTOMIZE_IMAGE: true DISABLE_AMP_IMAGE_BUILD: true OVN_L3_CREATE_PUBLIC_NETWORK: true Q_USE_PROVIDERNET_FOR_PUBLIC: true PHYSICAL_NETWORK: public ENABLE_CHASSIS_AS_GW: true OVN_DBS_LOG_LEVEL: dbg devstack_local_conf: post-config: $OCTAVIA_CONF: api_settings: enabled_provider_drivers: 'ovn:OVN provider driver' default_provider_driver: 'ovn' /etc/octavia/octavia-uwsgi.ini: uwsgi: processes: 1 test-config: "$TEMPEST_CONFIG": load_balancer: provider: 'ovn' enable_security_groups: True enabled_provider_drivers: 'ovn:OVN provider driver' test_sctp_protocol: True loadbalancer-feature-enabled: health_monitor_enabled: True pool_algorithms_enabled: False l7_protocol_enabled: False l4_protocol: "TCP" session_persistence_enabled: False not_implemented_is_error: False devstack_services: c-bak: false c-sch: false c-api: false c-vol: false cinder: false q-svc: true q-dns: true q-dhcp: false q-agt: false q-meta: false q-l3: false ovn-northd: true ovn-controller: true q-ovn-metadata-agent: false q-ovn-agent: true octavia: true o-api: true o-da: true o-hk: true o-cw: false o-hm: false swift: false s-account: false s-container: false s-object: false s-proxy: false tempest: true etcd: false devstack_plugins: neutron: https://opendev.org/openstack/neutron.git octavia: https://opendev.org/openstack/octavia.git octavia-tempest-plugin: https://opendev.org/openstack/octavia-tempest-plugin.git ovn-octavia-provider: https://opendev.org/openstack/ovn-octavia-provider tempest_test_regex: "^octavia_tempest_plugin.tests.(api|scenario).v2" tempest_exclude_regex: "\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_http_traffic)|\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_tcp_traffic)|\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_udp_traffic)|\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_sctp_traffic)|\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_source_ip_port_tcp_traffic)|\ (^octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_source_ip_port_udp_traffic)" zuul_copy_output: '{{ devstack_base_dir }}/data/ovs': 'logs' '{{ devstack_base_dir }}/data/ovn': 'logs' '{{ devstack_log_dir }}/ovsdb-server-nb.log': 'logs' '{{ devstack_log_dir }}/ovsdb-server-sb.log': 'logs' tempest_concurrency: 2 tox_envlist: all - job: name: ovn-octavia-provider-tempest-release parent: ovn-octavia-provider-tempest-base vars: devstack_localrc: OVN_BUILD_FROM_SOURCE: True INSTALL_OVN: True OVN_BRANCH: branch-24.03 OVS_BRANCH: branch-3.3 - job: name: ovn-octavia-provider-tempest-master parent: ovn-octavia-provider-tempest-base vars: devstack_localrc: OVN_BUILD_FROM_SOURCE: True INSTALL_OVN: True OVN_BRANCH: main # NOTE(froyo): OVN main branch following OVS stable branch OVS_BRANCH: branch-3.4 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924564.0 ovn_octavia_provider-8.1.0.dev15/zuul.d/project.yaml0000664000175100017510000000214015033037524021547 0ustar00mylesmyles- project: templates: - publish-openstack-docs-pti - release-notes-jobs-python3 - check-requirements - openstack-cover-jobs - openstack-python3-jobs-neutron check: jobs: - openstack-tox-cover: required-projects: - openstack/neutron - ovn-octavia-provider-functional-release - ovn-octavia-provider-functional-master - ovn-octavia-provider-tempest-release - ovn-octavia-provider-tempest-master: voting: false - kuryr-kubernetes-tempest-ovn-provider-ovn: voting: false - devstack-tobiko-octavia: voting: true vars: pytest_addopts_global: "--skipregex=OctaviaBasicTrafficScenarioTest" gate: fail-fast: true jobs: - ovn-octavia-provider-functional-release - ovn-octavia-provider-functional-master - ovn-octavia-provider-tempest-release - devstack-tobiko-octavia: voting: true vars: pytest_addopts_global: "--skipregex=OctaviaBasicTrafficScenarioTest"