././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6558483 oslo_limit-2.9.2/0000775000175000017500000000000015121004510012505 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/.converagerc0000664000175000017500000000014115121004454015007 0ustar00zuulzuul[run] branch = True source = oslo_limit omit = oslo_limit/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/.coveragerc0000664000175000017500000000015115121004454014632 0ustar00zuulzuul[run] branch = True source = limit omit = limit/tests/*,limit/openstack/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/.mailmap0000664000175000017500000000013115121004454014130 0ustar00zuulzuul# Format is: # # ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/.pre-commit-config.yaml0000664000175000017500000000130515121004454016774 0ustar00zuulzuulrepos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: - id: trailing-whitespace - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' - id: fix-byte-order-marker - id: check-executables-have-shebangs - id: check-merge-conflict - id: debug-statements - id: check-yaml files: .*\.(yaml|yml)$ - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.14.8 hooks: - id: ruff-check args: ['--fix', '--unsafe-fixes'] - id: ruff-format - repo: https://opendev.org/openstack/hacking rev: 8.0.0 hooks: - id: hacking additional_dependencies: [] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/.stestr.conf0000664000175000017500000000006215121004454014763 0ustar00zuulzuul[DEFAULT] test_path=./oslo_limit/tests top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/.zuul.yaml0000664000175000017500000000247415121004454014464 0ustar00zuulzuul- job: name: oslo-limit-functional-devstack parent: devstack description: This job sets up a minimal devstack deployment, configures various usage and limit scenarios, and verifies usage enforcement based on existing usage. timeout: 9000 vars: devstack_localrc: DATABASE_PASSWORD: secretdatabase ADMIN_PASSWORD: secretadmin LOGFILE: /opt/stack/logs/devstacklog.txt LOG_COLOR: false VERBOSE: true VERBOSE_NO_TIMESTAMP: true devstack_services: # Ignore all services by setting "disable_all_service". We do this # because we only really need keystone running to store unified limits. # These functional tests are not testing enforcement between keystone and # other services like nova and cinder. After this, we should only enable # exactly what we need to run keystone. base: false mysql: true key: true - project: templates: - check-requirements - openstack-cover-jobs - lib-forward-testing-python3 - openstack-python3-jobs - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - oslo-limit-functional-devstack gate: jobs: - oslo-limit-functional-devstack ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066503.0 oslo_limit-2.9.2/AUTHORS0000664000175000017500000000267515121004507013575 0ustar00zuulzuulAndreas Jaeger Ben Nemec Colleen Murphy Colleen Murphy Corey Bryant Dan Smith Daniel Bengtsson Doug Hellmann Ghanshyam Ghanshyam Mann Hervé Beraud John Garbutt John Garbutt Lance Bragstad Moisés Guimarães de Medeiros OpenStack Release Bot Piotr Korthals Sean McGinnis Stephen Finucane Takashi Kajinami Takashi Kajinami Victor Coutellier Vieri <15050873171@163.com> Vishakha Agarwal ZhijunWei ZhongShengping caoyuan gujin huang.zhiping jacky06 likui maaoyu melanie witt melissaml wangxiyuan wangzihao wu.shiming xuanyandong zhangzs zhoulinhui ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/CONTRIBUTING.rst0000664000175000017500000000152415121004454015157 0ustar00zuulzuulIf you would like to contribute to the development of oslo's libraries, first you must take a look to this page: https://specs.openstack.org/openstack/oslo-specs/specs/policy/contributing.html If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html If you already have a good understanding of how the system works and your OpenStack accounts are set up, you can skip to the development workflow section of this documentation to learn how changes to OpenStack should be submitted for review via the Gerrit tool: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/oslo.limit ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066503.0 oslo_limit-2.9.2/ChangeLog0000664000175000017500000001322215121004507014265 0ustar00zuulzuulCHANGES ======= 2.9.2 ----- * Enable logging related ruff checks * typing: Be looser in what we accept * typing: Accept None project ID * Run mypy from tox * Delay string interpolations at logging calls * Fix region query 2.9.1 ----- * Handle missing endpoint, region, service * fixture: Mock out services, regions * Remove reference to tag framework 2.9.0 ----- * Add typing * ruff: Enable more rules * Accept previously accepted format for endpoint\_service\_type * Apply ruff * reno: Update master for unmaintained/2024.1 * Bump pyupgrade target to 3.10+ * Migrate bandit options to pyproject.toml * Fix endpoint query * Add repository URL * pre-commit: Bump dependencies * Migrate setup configuration to pyproject.toml * Drop Python 3.9 support * Update master for stable/2025.2 2.8.0 ----- * add pyproject.toml to support pip 23.1 2.7.0 ----- * Fix ignored KSA adapter options * tox: Remove basepython * Added ability to select identity interface * Update master for stable/2025.1 * Call Keystone API once to get (registered\_)limits 2.6.1 ----- * Skip installation to speed up pep8 * reno: Update master for unmaintained/2023.1 2.6.0 ----- * Add note about requirements lower bounds * Bump min openstacksdk to use name filter for service * Run pyupgrade to clean up Python 2 syntaxes * pre-commit: Bump versions * Declare Python 3.12 support * Update master for stable/2024.2 * Query endpoint id from keystone 2.5.0 ----- * reno: Update master for unmaintained/zed * Remove old excludes * Update master for stable/2024.1 * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria 2.4.0 ----- * reno: Update master for unmaintained/yoga * Bump hacking * Update python classifier in setup.cfg 2.3.0 ----- * Add openstack-cover-jobs to .zuul.yaml * Fix up tox -e cover to use coverage * Ensure endpoint\_id is set * Update master for stable/2023.2 2.2.0 ----- * Bump bandit * Revert "Moves supported python runtimes from version 3.8 to 3.10" * Moves supported python runtimes from version 3.8 to 3.10 * Update master for stable/2023.1 2.1.0 ----- * Add Python3 antelope unit tests * Update master for stable/zed 2.0.1 ----- * Fix formatting of release list 2.0.0 ----- * Drop python3.6/3.7 support in testing runtime 1.6.0 ----- * Move Enforcer caching closer to limit retrieval * Add Python3 zed unit tests * Update master for stable/yoga * Add documentation for config options * oslo-config-generator: Fix ValueError 1.5.0 ----- * Add interfaces for getting limits without enforcing * Allow project\_id=None for enforce/calculate * Make calculate\_usage() work if limits are missing * Add caching of limits in Enforcer * Add auth plugin options to options list * Add Python3 yoga unit tests * Update master for stable/xena * setup.cfg: Replace dashes with underscores * Changed minversion in tox to 3.18.0 1.4.0 ----- * Add a test fixture * Add Enforcer.calculate\_usage() * Upgrade the pre-commit-hooks version * Add Python3 xena unit tests * Update master for stable/wallaby * Fix requirements issues * Use TOX\_CONSTRAINTS\_FILE * remove unicode from code * add py38 matedata 1.3.0 ----- * Use py3 as the default runtime for tox * Fix hacking min version to 3.0.1 * Remove install unnecessary packages * Add Python3 wallaby unit tests * Update master for stable/victoria * Adding pre-commit 1.2.1 ----- * Bump bandit version 1.2.0 ----- 1.1.0 ----- * Add debug tox environment * Add user guide about how to add a new service * Switch to newer openstackdocstheme and reno versions * Remove the unused coding style modules * Remove translation sections from setup.cfg * Align contributing doc with oslo's policy * Add release notes links to doc index * Add Python3 victoria unit tests * Update master for stable/ussuri 1.0.2 ----- * Update to hacking 3.0 * Update hacking for Python3 * Use unittest.mock instead of third party mock * Update the minversion parameter * ignore reno generated artifacts * drop use of six 1.0.1 ----- * oslo.limit mistakenly released as 1.0.0 (release note) * remove outdated header 1.0.0 ----- * [ussuri][goal] Drop python 2.7 support and testing * Trivial cleanup for tox 0.3.0 ----- * Add devstack job to .zuul.conf * Add flat enforcer * Fetch unified limits from keystone * Pick between Flat and StrictTwoLevel enforcement * Add ksa connection logic * tox: Keeping going with docs 0.2.0 ----- * Update master for stable/train 0.1.1 ----- * Add Python 3 Train unit tests 0.1.0 ----- * Rename filter\_resource resource\_filters * Add skeleton enforce() method to Enforcer * Remove \_\_enter\_\_ and \_\_exit\_\_ methods from Enforcer * Remove verification functionality * Remove ProjectClaim object from oslo.limit * Sync Sphinx requirement * Replace git.openstack.org URLs with opendev.org URLs * OpenDev Migration Patch * Update the min version of tox * Drop py35 jobs * Add py36 and py37 tox envs * Add python3.7 job on Stein+ * add python 3.7 unit test job * Update hacking version * fix the url in doc * Use template for lower-constraints * Update mailinglist from dev to discuss * Clean up .gitignore references to personal tools * Fix doc grammar/spelling nits * Use openstackdocstheme for documentation * Add a conceptual overview to docs * Render API reference documentation * Ignore documentation builds * Add opts file * add lib-forward-testing-python3 test job * add python 3.6 unit test job * import zuul job settings from project-config * fix doc gate * ADD i18n file * Fix CI * Add .zuul.yaml * Implement basic Enforcer context manager * Implement ProjectClaim objects * Convert tox.ini to using stestr * Update url in HACKING.rst * fix tox python3 overrides * Init repo * Added .gitreview ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/HACKING.rst0000664000175000017500000000021615121004454014311 0ustar00zuulzuuloslo.limit Style Commandments ============================= Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/LICENSE0000664000175000017500000002363715121004454013534 0ustar00zuulzuul Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6558483 oslo_limit-2.9.2/PKG-INFO0000644000175000017500000000403115121004510013576 0ustar00zuulzuulMetadata-Version: 2.4 Name: oslo.limit Version: 2.9.2 Summary: Limit enforcement library to assist with quota calculation. Author-email: OpenStack License: Apache-2.0 Project-URL: Homepage, https://docs.openstack.org/oslo.limit Project-URL: Repository, https://opendev.org/openstack/oslo.limit Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.10 Description-Content-Type: text/x-rst License-File: LICENSE Requires-Dist: keystoneauth1>=3.9.0 Requires-Dist: oslo.config>=5.2.0 Requires-Dist: oslo.i18n>=3.15.3 Requires-Dist: oslo.log>=3.44.0 Requires-Dist: openstacksdk>=0.47.0 Dynamic: license-file Dynamic: requires-dist ========== oslo.limit ========== .. image:: https://governance.openstack.org/tc/badges/oslo.limit.svg .. Change things from this point on .. image:: https://img.shields.io/pypi/v/oslo.limit.svg :target: https://pypi.python.org/pypi/oslo.limit/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.limit.svg :target: https://pypi.python.org/pypi/oslo.limit/ :alt: Downloads Oslo.limit is the limit enforcement library to assist with quota calculation. It aims to provide support for quota enforcement across all OpenStack services. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.limit/latest/ * Source: http://opendev.org/openstack/oslo.limit * Bugs: http://bugs.launchpad.net/oslo.limit ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/README.rst0000664000175000017500000000136615121004454014211 0ustar00zuulzuul========== oslo.limit ========== .. image:: https://governance.openstack.org/tc/badges/oslo.limit.svg .. Change things from this point on .. image:: https://img.shields.io/pypi/v/oslo.limit.svg :target: https://pypi.python.org/pypi/oslo.limit/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.limit.svg :target: https://pypi.python.org/pypi/oslo.limit/ :alt: Downloads Oslo.limit is the limit enforcement library to assist with quota calculation. It aims to provide support for quota enforcement across all OpenStack services. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.limit/latest/ * Source: http://opendev.org/openstack/oslo.limit * Bugs: http://bugs.launchpad.net/oslo.limit ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6518483 oslo_limit-2.9.2/doc/0000775000175000017500000000000015121004510013252 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/doc/requirements.txt0000664000175000017500000000023015121004454016540 0ustar00zuulzuulfixtures>=3.0.0 # Apache-2.0/BSD openstackdocstheme>=2.2.1 # Apache-2.0 reno>=3.1.0 # Apache-2.0 sphinx>=2.0.0 # BSD sphinxcontrib-apidoc>=0.2.0 # BSD ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6518483 oslo_limit-2.9.2/doc/source/0000775000175000017500000000000015121004510014552 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/doc/source/conf.py0000664000175000017500000000321415121004454016060 0ustar00zuulzuul# Copyright (C) 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'openstackdocstheme', 'sphinxcontrib.apidoc', 'oslo_config.sphinxext', ] apidoc_module_dir = '../../oslo_limit' apidoc_excluded_paths = ['tests'] apidoc_output_dir = 'reference/api' apidoc_separate_modules = True # The suffix of source filenames. # source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'oslo.limit' copyright = '2018, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' html_theme = 'openstackdocs' ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6518483 oslo_limit-2.9.2/doc/source/contributor/0000775000175000017500000000000015121004510017124 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/doc/source/contributor/index.rst0000664000175000017500000000012415121004454020771 0ustar00zuulzuul============== Contributing ============== .. include:: ../../../CONTRIBUTING.rst ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/doc/source/index.rst0000664000175000017500000000067015121004454016425 0ustar00zuulzuul============ oslo.limit ============ Limit enforcement library to assist with quota calculation. Contents ======== .. toctree:: :maxdepth: 2 install/index contributor/index reference/index user/index Release Notes ============= Read also the `oslo.limit Release Notes `_. Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6518483 oslo_limit-2.9.2/doc/source/install/0000775000175000017500000000000015121004510016220 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/doc/source/install/index.rst0000664000175000017500000000014115121004454020064 0ustar00zuulzuul============== Installation ============== At the command line:: $ pip install oslo.limit ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6518483 oslo_limit-2.9.2/doc/source/reference/0000775000175000017500000000000015121004510016510 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/doc/source/reference/index.rst0000664000175000017500000000017415121004454020362 0ustar00zuulzuul========= Reference ========= .. toctree:: :maxdepth: 2 opts API === .. toctree:: :maxdepth: 1 api/modules ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/doc/source/reference/opts.rst0000664000175000017500000000041615121004454020237 0ustar00zuulzuul===================== Configuration Options ===================== oslo.limit uses oslo.config to define and manage configuration options to allow the deployer to control how an application uses the underlying quota limits and enforcement. .. show-options:: oslo.limit ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6528482 oslo_limit-2.9.2/doc/source/user/0000775000175000017500000000000015121004510015530 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/doc/source/user/index.rst0000664000175000017500000000011515121004454017375 0ustar00zuulzuulUsing oslo.limit ================ .. toctree:: usage.rst testing.rst ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/doc/source/user/testing.rst0000664000175000017500000000135615121004454017753 0ustar00zuulzuul======= Testing ======= To test a project that uses oslo.limit, a fixture is provided. This mocks out the connection to keystone and retrieval of registered and project limits. Example ======= .. code-block:: python from oslo_limit import fixture class MyTest(unittest.TestCase): def setUp(self): super(MyTest, self).setUp() # Default limit of 10 widgets registered_limits = {'widgets': 10} # project2 gets 20 widgets project_limits = {'project2': {'widgets': 20}} self.useFixture(fixture.LimitFixture(registered_limits, project_limits)) def test_thing(self): # ... use limit.Enforcer() as usual ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/doc/source/user/usage.rst0000664000175000017500000001443315121004454017402 0ustar00zuulzuul======= Usage ======= To use oslo.limit in a project:: from oslo_limit import limit Conceptual Overview =================== This library is meant to aid service developers performing usage checks for resources managed by their service. It does this by clearly defining what is being claimed, where resources are being claimed, and encapsulating enforcement logic behind easy-to-use utilities. The following subsections introduce common terms and concepts useful for communicating within the context of usage enforcement for distributed systems. Usage ----- Usage is the real-time allocation of resources belonging to someone or something. They are assumed to be present, or created, and thus accounted for. With respect to OpenStack being a distributed system, the service responsible for the resource is considered the usage authority for that resource. Ensuring accurate usage almost always requires the service to perform a lookup and possibly aggregate results to definitively provide usage information. Limit ----- A limit is the total number of resources someone or something *should* have. With respect to OpenStack, the service which owns a particular resource may also own that resource's limit. Conversely, limit information may be centralized in a shared service. The latter is the pattern implied by the usage of this library. The justification for decoupling resource limits from individual services is to make it easier to provide a consistent experience for users or operators setting and enforcing limits, regardless of the resource. Claim ----- A claim represents the quantity of resources being asked for by someone. Claims are constrained by the relationship between resource usage and limits. Successful claims are aggregated into usage. Within the OpenStack ecosystem, claims can be made against specific targets depending on the resource. For instance, a user may request two additional servers for her project. This resulting claim might be two instances, the total number of cores between both instances, the total memory consumed by both instances, or all three. The claim is also targeted to a specific project, which affects how this library asks for usage information. Enforcement ----------- Enforcement is the process of collecting usage data, limit information, and claims in order to make a decision about whether a user should be able to obtain more resources. Adding oslo.limit to a service ============================== Configuration ------------- The oslo.limit library will by default lookup for a ``[oslo_limit]`` section in the configuration file of the service. This section must contain standard authentication information againt Keystone service in order to query the unified limit APIs. Be aware that the service account requires at a minimum a reader role assigned on the system scope for enforcing limits, and authentication information **should not** contains project information as keystoneauth library will use it instead of system_scope. In addition to the authentication information, ``oslo_limit`` configuration section must contain a way to identify the service in order to filter limits by it. This can either be a combination of ``service_name``, ``service_type`` and ``region_name``, or simply ``endpoint_id``. Here is an example of oslo_limit configuration .. code-block:: ini [oslo_limit] auth_url = http://controller:5000 auth_type = password user_domain_id = default username = MY_SERVICE system_scope = reader password = MY_PASSWORD service_name = my_service region_name = RegionOne Create registered limit ----------------------- Before enforcing a limit for a given resource, a registered limit **should** exist for that resource. Registered limits can be, for example, configured during service deployment. .. note:: Your user account must have the admin role assigned on the system scope to create registered limits. Enforce a limit --------------- Using enforcer consists mainly of defining a callback function for processing the current usage of a given project, then calling the ``enforce`` function with the amount of each resource you want to consume for a project, handling the possible quota exceeded exceptions. Here is a simple usage of limit enforcement .. code-block:: python import logging from oslo_limit import limit from oslo_limit import exception as limit_exceptions # Callback function who need to return resource usage for each # resource asked in resources_names, for a given project_id def callback(project_id, resource_names): return {x: get_resource_usage_by_project(x, project_id) for x in resource_names} enforcer = limit.Enforcer(callback) try: # Check a limit for a given project for a set of resources, resource # unit are delta to be consumed enforcer.enforce('project_uuid', {'my_resource': 1}) except limit_exceptions.ProjectOverLimit as e: # What to do in case of limit exception, e contain a list of # resource over quota logging.error(e) Check a limit ------------- Another usage pattern is to check a limit and usage for a given project, outside the scope of enforcement. This may be useful in a reporting API to be able to expose to a user the limit and usage information that the enforcer would use to judge a resource consumption event. Any limit passed to this API which is not registered in keystone will be considered to be zero, in keeping with the behavior of the enforcer assuming that "unregistered means no quota." .. note:: This should ideally not be used to provide your own enforcement of limits, but rather for reporting or planning purposes. Here is a simple usage of limit reporting .. code-block:: python import logging from oslo_limit import limit # Callback function who need to return resource usage for each # resource asked in resources_names, for a given project_id def callback(project_id, resource_names): return {x: get_resource_usage_by_project(x, project_id) for x in resource_names} enforcer = limit.Enforcer(callback) usage = enforcer.calculate_usage('project_uuid', ['my_resource']) logging.info('%s using %i out of %i allowed %s resource' % ( 'project_uuid', usage['my_resource'].usage, usage['my_resource'].limit, 'my_resource')) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6558483 oslo_limit-2.9.2/oslo.limit.egg-info/0000775000175000017500000000000015121004510016270 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066503.0 oslo_limit-2.9.2/oslo.limit.egg-info/PKG-INFO0000644000175000017500000000403115121004507017367 0ustar00zuulzuulMetadata-Version: 2.4 Name: oslo.limit Version: 2.9.2 Summary: Limit enforcement library to assist with quota calculation. Author-email: OpenStack License: Apache-2.0 Project-URL: Homepage, https://docs.openstack.org/oslo.limit Project-URL: Repository, https://opendev.org/openstack/oslo.limit Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.10 Description-Content-Type: text/x-rst License-File: LICENSE Requires-Dist: keystoneauth1>=3.9.0 Requires-Dist: oslo.config>=5.2.0 Requires-Dist: oslo.i18n>=3.15.3 Requires-Dist: oslo.log>=3.44.0 Requires-Dist: openstacksdk>=0.47.0 Dynamic: license-file Dynamic: requires-dist ========== oslo.limit ========== .. image:: https://governance.openstack.org/tc/badges/oslo.limit.svg .. Change things from this point on .. image:: https://img.shields.io/pypi/v/oslo.limit.svg :target: https://pypi.python.org/pypi/oslo.limit/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.limit.svg :target: https://pypi.python.org/pypi/oslo.limit/ :alt: Downloads Oslo.limit is the limit enforcement library to assist with quota calculation. It aims to provide support for quota enforcement across all OpenStack services. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.limit/latest/ * Source: http://opendev.org/openstack/oslo.limit * Bugs: http://bugs.launchpad.net/oslo.limit ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066503.0 oslo_limit-2.9.2/oslo.limit.egg-info/SOURCES.txt0000664000175000017500000000403715121004507020166 0ustar00zuulzuul.converagerc .coveragerc .mailmap .pre-commit-config.yaml .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst pyproject.toml requirements.txt setup.cfg setup.py test-requirements.txt tox.ini doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/contributor/index.rst doc/source/install/index.rst doc/source/reference/index.rst doc/source/reference/opts.rst doc/source/user/index.rst doc/source/user/testing.rst doc/source/user/usage.rst oslo.limit.egg-info/PKG-INFO oslo.limit.egg-info/SOURCES.txt oslo.limit.egg-info/dependency_links.txt oslo.limit.egg-info/entry_points.txt oslo.limit.egg-info/not-zip-safe oslo.limit.egg-info/pbr.json oslo.limit.egg-info/requires.txt oslo.limit.egg-info/top_level.txt oslo_limit/__init__.py oslo_limit/_i18n.py oslo_limit/exception.py oslo_limit/fixture.py oslo_limit/limit.py oslo_limit/opts.py oslo_limit/py.typed oslo_limit/tests/__init__.py oslo_limit/tests/test_fixture.py oslo_limit/tests/test_limit.py releasenotes/notes/bug-1962406-e239d60400c726c8.yaml releasenotes/notes/bug-2123895-d46347955768fab1.yaml releasenotes/notes/drop-python27-support-7c1d29f348060147.yaml releasenotes/notes/enforcer-limit-caching-fb59725aad88b039.yaml releasenotes/notes/fix-ignored-ksa-adapter-options-e120fac9a6fd35f7.yaml releasenotes/notes/look-up-endpoint-id-from-keystone-9d8419673902c258.yaml releasenotes/notes/pre-stable-version-warning-83dbfc9427a22725.yaml releasenotes/notes/remove-py39-19083e939173e39e.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/2024.2.rst releasenotes/source/2025.1.rst releasenotes/source/2025.2.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066503.0 oslo_limit-2.9.2/oslo.limit.egg-info/dependency_links.txt0000664000175000017500000000000115121004507022344 0ustar00zuulzuul ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066503.0 oslo_limit-2.9.2/oslo.limit.egg-info/entry_points.txt0000664000175000017500000000007215121004507021573 0ustar00zuulzuul[oslo.config.opts] oslo.limit = oslo_limit.opts:list_opts ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066503.0 oslo_limit-2.9.2/oslo.limit.egg-info/not-zip-safe0000664000175000017500000000000115121004507020524 0ustar00zuulzuul ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066503.0 oslo_limit-2.9.2/oslo.limit.egg-info/pbr.json0000664000175000017500000000005615121004507017755 0ustar00zuulzuul{"git_version": "fcc8fca", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066503.0 oslo_limit-2.9.2/oslo.limit.egg-info/requires.txt0000664000175000017500000000014015121004507020671 0ustar00zuulzuulkeystoneauth1>=3.9.0 oslo.config>=5.2.0 oslo.i18n>=3.15.3 oslo.log>=3.44.0 openstacksdk>=0.47.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066503.0 oslo_limit-2.9.2/oslo.limit.egg-info/top_level.txt0000664000175000017500000000001315121004507021022 0ustar00zuulzuuloslo_limit ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6538484 oslo_limit-2.9.2/oslo_limit/0000775000175000017500000000000015121004510014657 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/oslo_limit/__init__.py0000664000175000017500000000000015121004454016765 0ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/oslo_limit/_i18n.py0000664000175000017500000000147215121004454016162 0ustar00zuulzuul# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/index.html . """ import oslo_i18n _translators = oslo_i18n.TranslatorFactory(domain='oslo_limit') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/oslo_limit/exception.py0000664000175000017500000000470215121004454017241 0ustar00zuulzuul# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_limit._i18n import _ class ProjectOverLimit(Exception): def __init__( self, project_id: str | None, over_limit_info_list: list['OverLimitInfo'], ) -> None: """Exception raised when a project goes over one or more limits :param project_id: the project id :param over_limit_info_list: list of OverLimitInfo objects """ if not isinstance(over_limit_info_list, list): raise ValueError(over_limit_info_list) if len(over_limit_info_list) == 0: raise ValueError(over_limit_info_list) for info in over_limit_info_list: if not isinstance(info, OverLimitInfo): raise ValueError(over_limit_info_list) self.project_id = project_id self.over_limit_info_list = over_limit_info_list msg = _("Project %(project_id)s is over a limit for %(limits)s") % { 'project_id': project_id, 'limits': over_limit_info_list, } super().__init__(msg) class OverLimitInfo: def __init__( self, resource_name: str, limit: int, current_usage: int, delta: int, ): self.resource_name = resource_name self.limit = int(limit) self.current_usage = int(current_usage) self.delta = int(delta) def __str__(self) -> str: template = ( "Resource %s is over limit of %s due to " "current usage %s and delta %s" ) return template % ( self.resource_name, self.limit, self.current_usage, self.delta, ) def __repr__(self) -> str: return self.__str__() class SessionInitError(Exception): def __init__(self, reason: object) -> None: msg = _("Can't initialise OpenStackSDK session: %(reason)s.") % { 'reason': reason } super().__init__(msg) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/oslo_limit/fixture.py0000664000175000017500000001342515121004454016733 0ustar00zuulzuul# Copyright 2021 Red Hat, Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections.abc import Generator from typing import Any from unittest import mock import fixtures as fixtures from openstack.identity.v3 import endpoint as _endpoint from openstack.identity.v3 import limit as _limit from openstack.identity.v3 import region as _region from openstack.identity.v3 import registered_limit as _registered_limit from openstack.identity.v3 import service as _service from openstack.test import fakes as sdk_fakes def _fake_services( **query: dict[str, Any], ) -> Generator[_service.Service, None, None]: # we are the only ones calling this, so we know exactly what we should be # calling it with assert set(query) == {'type', 'name'} yield sdk_fakes.generate_fake_resource( _service.Service, type=query['type'], name=query['name'] ) def _fake_region( region: str | _region.Region, ) -> _region.Region: if isinstance(region, str): region_id = region else: region_id = region.id return sdk_fakes.generate_fake_resource(_region.Region, id=region_id) def _fake_endpoints( **query: dict[str, Any], ) -> Generator[_endpoint.Endpoint, None, None]: # we are the only ones calling this, so we know exactly what we should be # calling it with assert set(query) == {'service_id', 'region_id', 'interface'} yield sdk_fakes.generate_fake_resource( _endpoint.Endpoint, service_id=query['service_id'], region_id=query['region_id'], interface=query['interface'], ) class LimitFixture(fixtures.Fixture): def __init__( self, reglimits: dict[str, int], projlimits: dict[str, dict[str, int]], ) -> None: """A fixture for testing code that relies on Keystone Unified Limits. :param reglimits: A dictionary of {resource_name: limit} values to simulate registered limits in keystone. :type reglimits: dict :param projlimits: A dictionary of dictionaries defining per-project limits like {project_id: {resource_name: limit}}. As in reality, only per-project overrides need be provided here; any unmentioned projects or resources will take the registered limit defaults. :type projlimits: dict """ self.reglimits = reglimits self.projlimits = projlimits def get_reglimit_objects( self, service_id: str | None = None, region_id: str | None = None, resource_name: str | None = None, ) -> list[_registered_limit.RegisteredLimit]: registered_limits = [] for name, value in self.reglimits.items(): if resource_name and resource_name != name: continue registered_limit = sdk_fakes.generate_fake_resource( _registered_limit.RegisteredLimit, resource_name=name, default_limit=value, ) registered_limits.append(registered_limit) return registered_limits def get_projlimit_objects( self, service_id: str | None = None, region_id: str | None = None, resource_name: str | None = None, project_id: str | None = None, ) -> list[_limit.Limit]: limits = [] for proj_id, limit_dict in self.projlimits.items(): if project_id and project_id != proj_id: continue for name, value in limit_dict.items(): if resource_name and resource_name != name: continue limit = sdk_fakes.generate_fake_resource( _limit.Limit, resource_name=name, resource_limit=value, project_id=proj_id, ) limits.append(limit) return limits def setUp(self) -> None: super().setUp() # We mock our own cached connection to Keystone self.mock_conn = mock.MagicMock() self.useFixture( fixtures.MockPatch( 'oslo_limit.limit._SDK_CONNECTION', new=self.mock_conn ) ) # Use a flat enforcement model mock_gem = self.useFixture( fixtures.MockPatch( 'oslo_limit.limit.Enforcer._get_enforcement_model' ) ).mock mock_gem.return_value = 'flat' # Fake keystone endpoint; this can be requested by ID or by name and we # need to handle both. First, requests by ID fake_endpoint = sdk_fakes.generate_fake_resource( _endpoint.Endpoint, service_id='service_id', region_id='region_id', ) self.mock_conn.get_endpoint.return_value = fake_endpoint # Then, requests by name self.mock_conn.services.side_effect = _fake_services self.mock_conn.get_region.side_effect = _fake_region self.mock_conn.endpoints.side_effect = _fake_endpoints # Finally, fake the actual limits and registered limits calls self.mock_conn.limits.side_effect = self.get_projlimit_objects self.mock_conn.registered_limits.side_effect = ( self.get_reglimit_objects ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/oslo_limit/limit.py0000664000175000017500000005502415121004454016364 0ustar00zuulzuul# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections.abc import Callable, Collection from collections import defaultdict, namedtuple from typing import cast, Protocol, TypeAlias from keystoneauth1 import exceptions as ksa_exceptions from keystoneauth1 import loading from openstack import connection from openstack import exceptions as os_exceptions from openstack.identity.v3 import _proxy as _identity_proxy from openstack.identity.v3 import endpoint as _endpoint from openstack.identity.v3 import limit as _limit from openstack.identity.v3 import registered_limit as _registered_limit from oslo_config import cfg from oslo_log import log from oslo_limit import exception from oslo_limit import opts CONF = cfg.CONF LOG = log.getLogger(__name__) _SDK_CONNECTION: _identity_proxy.Proxy | None = None ProjectUsage = namedtuple('ProjectUsage', ['limit', 'usage']) UsageCallbackT: TypeAlias = Callable[ [str | None, Collection[str]], dict[str, int] ] opts.register_opts(CONF) class _EnforcerImplProtocol(Protocol): name: str def __init__( self, usage_callback: UsageCallbackT, cache: bool = True ) -> None: ... def get_registered_limits( self, resources_to_check: Collection[str] ) -> list[tuple[str, int]]: ... def get_project_limits( self, project_id: str | None, resource_names: Collection[str] ) -> list[tuple[str, int]]: ... def get_project_usage( self, project_id: str | None, resources_to_check: Collection[str] ) -> dict[str, int]: ... def enforce( self, project_id: str | None, deltas: dict[str, int] ) -> None: ... def _get_keystone_connection() -> _identity_proxy.Proxy: global _SDK_CONNECTION if not _SDK_CONNECTION: try: auth = loading.load_auth_from_conf_options( CONF, group='oslo_limit' ) session = loading.load_session_from_conf_options( CONF, group='oslo_limit', auth=auth ) ksa_opts = loading.get_adapter_conf_options( include_deprecated=False ) conn_kwargs = {} for opt in ksa_opts: if opt.dest != 'valid_interfaces': conn_kwargs['identity_' + opt.dest] = getattr( CONF.oslo_limit, opt.dest ) conn_kwargs['identity_interface'] = ( CONF.oslo_limit.valid_interfaces ) _SDK_CONNECTION = connection.Connection( session=session, **conn_kwargs ).identity except ( ksa_exceptions.NoMatchingPlugin, ksa_exceptions.MissingRequiredOptions, ksa_exceptions.MissingAuthPlugin, ksa_exceptions.DiscoveryFailure, ksa_exceptions.Unauthorized, ) as e: msg = f'Unable to initialize OpenStackSDK session: {e}' LOG.error(msg) raise exception.SessionInitError(e) return _SDK_CONNECTION class Enforcer: model: _EnforcerImplProtocol def __init__( self, usage_callback: UsageCallbackT, cache: bool = True ) -> None: """An object for checking usage against resource limits and requests. :param usage_callback: A callable function that accepts a project_id string as a parameter and calculates the current usage of a resource. :param cache: Whether to cache resource limits for the lifetime of this enforcer. Defaults to True. """ if not callable(usage_callback): msg = 'usage_callback must be a callable function.' raise ValueError(msg) self.connection = _get_keystone_connection() self.model = self._get_model_impl(usage_callback, cache=cache) def _get_enforcement_model(self) -> str: """Query keystone for the configured enforcement model.""" return self.connection.get('/limits/model').json()['model']['name'] # type: ignore def _get_model_impl( self, usage_callback: UsageCallbackT, cache: bool = True ) -> _EnforcerImplProtocol: """get the enforcement model based on configured model in keystone.""" model = self._get_enforcement_model() for impl in _MODELS: if model == impl.name: return impl(usage_callback, cache=cache) raise ValueError(f"enforcement model {model} is not supported") def enforce(self, project_id: str | None, deltas: dict[str, int]) -> None: """Check resource usage against limits for resources in deltas From the deltas we extract the list of resource types that need to have limits enforced on them. From keystone we fetch limits relating to this project_id (if not None) and the endpoint specified in the configuration. Using the usage_callback specified when creating the enforcer, we fetch the existing usage. We then add the existing usage to the provided deltas to get the total proposed usage. This total proposed usage is then compared against all appropriate limits that apply. Note if there are no registered limits for a given resource type, we fail the enforce in the same way as if we defaulted to a limit of zero, i.e. do not allow any use of a resource type that does not have a registered limit. Note that if a project_id of None is provided, we just compare against the registered limits (i.e. use this for non-project-scoped limits) :param project_id: The project to check usage and enforce limits against (or None). :param deltas: An dictionary containing resource names as keys and requests resource quantities as positive integers. We only check limits for resources in deltas. Specify a quantity of zero to check current usage. :raises exception.ClaimExceedsLimit: when over limits """ if project_id is not None and ( not project_id or not isinstance(project_id, str) ): msg = 'project_id must be a non-empty string or None.' raise ValueError(msg) if not isinstance(deltas, dict) or len(deltas) == 0: msg = 'deltas must be a non-empty dictionary.' raise ValueError(msg) for k, v in deltas.items(): if not isinstance(k, str): raise ValueError('resource name is not a string.') elif not isinstance(v, int): raise ValueError('resource limit is not an integer.') self.model.enforce(project_id, deltas) def calculate_usage( self, project_id: str | None, resources_to_check: Collection[str] ) -> dict[str, ProjectUsage]: """Calculate resource usage and limits for resources_to_check. From the list of resources_to_check, we collect the project's limit and current usage for each, exactly like we would for enforce(). This is useful for reporting current project usage and limits in a situation where enforcement is not desired. This should *not* be used to conduct custom enforcement, but rather only for reporting. :param project_id: The project for which to check usage and limits, or None. :param resources_to_check: A list of resource names to query. :returns: A dictionary of name:limit.ProjectUsage for the requested names against the provided project. """ if project_id is not None and ( not project_id or not isinstance(project_id, str) ): msg = 'project_id must be a non-empty string or None.' raise ValueError(msg) msg = ( 'resources_to_check must be non-empty sequence of ' 'resource name strings' ) try: if len(resources_to_check) == 0: raise ValueError(msg) except TypeError: raise ValueError(msg) for resource_name in resources_to_check: if not isinstance(resource_name, str): raise ValueError(msg) limits = self.model.get_project_limits(project_id, resources_to_check) usage = self.model.get_project_usage(project_id, resources_to_check) return { resource: ProjectUsage(limit, usage[resource]) for resource, limit in limits } def get_registered_limits( self, resources_to_check: Collection[str] ) -> list[tuple[str, int]]: return self.model.get_registered_limits(resources_to_check) def get_project_limits( self, project_id: str | None, resources_to_check: Collection[str] ) -> list[tuple[str, int]]: return self.model.get_project_limits(project_id, resources_to_check) class _FlatEnforcer: name = 'flat' def __init__( self, usage_callback: UsageCallbackT, cache: bool = True ) -> None: self._usage_callback = usage_callback self._utils = _EnforcerUtils(cache=cache) def get_registered_limits( self, resources_to_check: Collection[str] ) -> list[tuple[str, int]]: return self._utils.get_registered_limits(resources_to_check) def get_project_limits( self, project_id: str | None, resources_to_check: Collection[str] ) -> list[tuple[str, int]]: return self._utils.get_project_limits(project_id, resources_to_check) def get_project_usage( self, project_id: str | None, resources_to_check: Collection[str] ) -> dict[str, int]: return self._usage_callback(project_id, resources_to_check) def enforce(self, project_id: str | None, deltas: dict[str, int]) -> None: resources_to_check = list(deltas.keys()) # Always check the limits in the same order, for predictable errors resources_to_check.sort() project_limits = self.get_project_limits( project_id, resources_to_check ) current_usage = self.get_project_usage(project_id, resources_to_check) self._utils.enforce_limits( project_id, project_limits, current_usage, deltas ) class _StrictTwoLevelEnforcer: name = 'strict-two-level' def __init__( self, usage_callback: UsageCallbackT, cache: bool = True ) -> None: self._usage_callback = usage_callback def get_registered_limits( self, resources_to_check: Collection[str] ) -> list[tuple[str, int]]: raise NotImplementedError() def get_project_limits( self, project_id: str | None, resources_to_check: Collection[str] ) -> list[tuple[str, int]]: raise NotImplementedError() def get_project_usage( self, project_id: str | None, resources_to_check: Collection[str] ) -> dict[str, int]: raise NotImplementedError() def enforce(self, project_id: str | None, deltas: dict[str, int]) -> None: raise NotImplementedError() _MODELS: list[type[_EnforcerImplProtocol]] = [ _FlatEnforcer, _StrictTwoLevelEnforcer, ] class _LimitNotFound(Exception): def __init__(self, resource: str) -> None: msg = f"Can't find the limit for resource {resource}" self.resource = resource super().__init__(msg) class _EnforcerUtils: """Logic common used by multiple enforcers""" def __init__(self, cache: bool = True) -> None: self.connection = _get_keystone_connection() self.should_cache = cache # {project_id: {resource_name: project_limit}} self.plimit_cache: dict[str, dict[str, _limit.Limit]] = defaultdict( dict ) # {resource_name: registered_limit} self.rlimit_cache: dict[str, _registered_limit.RegisteredLimit] = {} self._endpoint: _endpoint.Endpoint = self._get_endpoint() self._service_id: str = self._endpoint.service_id self._region_id: str = self._endpoint.region_id def _get_endpoint(self) -> _endpoint.Endpoint: endpoint = self._get_endpoint_by_id() if endpoint is not None: return endpoint return self._get_endpoint_by_service_lookup() def _get_endpoint_by_id(self) -> _endpoint.Endpoint | None: endpoint_id = CONF.oslo_limit.endpoint_id if endpoint_id is None: return None try: endpoint = self.connection.get_endpoint(endpoint_id) # type: ignore except os_exceptions.ResourceNotFound: raise ValueError(f"Can't find endpoint for {endpoint_id}") return cast(_endpoint.Endpoint, endpoint) def _get_endpoint_by_service_lookup(self) -> _endpoint.Endpoint: service_type = CONF.oslo_limit.endpoint_service_type service_name = CONF.oslo_limit.endpoint_service_name if not service_type and not service_name: raise ValueError( "Either service_type or service_name should be set" ) # find service services = self.connection.services( # type: ignore type=service_type, name=service_name ) services = list(services) if len(services) > 1: raise ValueError("Multiple services found") if len(services) == 0: raise ValueError("Service not found") service_id = services[0].id # find region (if any) if CONF.oslo_limit.endpoint_region_name is not None: try: region = self.connection.get_region( # type: ignore CONF.oslo_limit.endpoint_region_name ) except os_exceptions.ResourceNotFound: raise ValueError("Region not found") region_id = region.id else: region_id = None # find endpoint interface = CONF.oslo_limit.endpoint_interface if interface.endswith('URL'): interface = interface[:-3] endpoints = self.connection.endpoints( # type: ignore service_id=service_id, region_id=region_id, interface=interface, ) endpoints = list(endpoints) if len(endpoints) > 1: raise ValueError("Multiple endpoints found") if len(endpoints) == 0: raise ValueError("Endpoint not found") return cast(_endpoint.Endpoint, endpoints[0]) @staticmethod def enforce_limits( project_id: str | None, limits: Collection[tuple[str, int]], current_usage: dict[str, int], deltas: dict[str, int], ) -> None: """Check that proposed usage is not over given limits :param project_id: project being checked or None :param limits: list of (resource_name,limit) pairs :param current_usage: dict of resource name and current usage :param deltas: dict of resource name and proposed additional usage :raises exception.ClaimExceedsLimit: raise if over limit """ over_limit_list = [] for resource_name, limit in limits: if resource_name not in current_usage: msg = f"unable to get current usage for {resource_name}" raise ValueError(msg) current = int(current_usage[resource_name]) delta = int(deltas[resource_name]) proposed_usage_total = current + delta if proposed_usage_total > limit: over_limit_list.append( exception.OverLimitInfo( resource_name, limit, current, delta ) ) if len(over_limit_list) > 0: LOG.debug("hit limit for project: %s", over_limit_list) raise exception.ProjectOverLimit(project_id, over_limit_list) def _get_registered_limits(self) -> list[tuple[str, int]]: registered_limits = [] reg_limits = self.connection.registered_limits( # type: ignore service_id=self._service_id, region_id=self._region_id ) for reg_limit in reg_limits: name, limit = reg_limit.resource_name, reg_limit.default_limit registered_limits.append((name, limit)) if self.should_cache: self.rlimit_cache[name] = reg_limit return registered_limits def get_registered_limits( self, resource_names: Collection[str] | None ) -> list[tuple[str, int]]: """Get all the default limits for a given resource name list :param resource_names: list of resource_name strings :return: list of (resource_name, limit) pairs """ # If None was passed for resource_names, get and return all of the # registered limits. if resource_names is None: return self._get_registered_limits() # Using a list to preserve the resource_name order registered_limits = [] for resource_name in resource_names: reg_limit = self._get_registered_limit(resource_name) if reg_limit: limit = reg_limit.default_limit else: limit = 0 registered_limits.append((resource_name, limit)) return registered_limits def _get_project_limits(self, project_id: str) -> list[tuple[str, int]]: project_limits = [] proj_limits = self.connection.limits( # type: ignore service_id=self._service_id, region_id=self._region_id, project_id=project_id, ) for proj_limit in proj_limits: name, limit = proj_limit.resource_name, proj_limit.resource_limit project_limits.append((name, limit)) if self.should_cache: self.plimit_cache[project_id][name] = proj_limit return project_limits def get_project_limits( self, project_id: str | None, resource_names: Collection[str] | None ) -> list[tuple[str, int]]: """Get all the limits for given project a resource_name list If a limit is not found, it will be considered to be zero (i.e. no quota) :param project_id: project being checked or None :param resource_names: list of resource_name strings :return: list of (resource_name,limit) pairs """ # If None was passed for resource_names, get and return all of the # limits. if resource_names is None: if project_id is None: # If we were to pass None, we would receive limits for all # projects and we would have to return {project_id: [(name, # limit), ...]} which would be inconsistent with the return # format of the other methods. raise ValueError('project_id must not be None') return self._get_project_limits(project_id) # Using a list to preserver the resource_name order project_limits = [] for resource_name in resource_names: try: limit = self._get_limit(project_id, resource_name) except _LimitNotFound: limit = 0 project_limits.append((resource_name, limit)) return project_limits def _get_limit(self, project_id: str | None, resource_name: str) -> int: # If we are configured to cache limits, look in the cache first and use # the cached value if there is one. Else, retrieve the limit and add it # to the cache. Do this for both project limits and registered limits. # Look for a project limit first. project_limit = ( self._get_project_limit(project_id, resource_name) if project_id is not None else None ) if project_limit: return cast(int, project_limit.resource_limit) # If there is no project limit, look for a registered limit. registered_limit = self._get_registered_limit(resource_name) if registered_limit: return cast(int, registered_limit.default_limit) LOG.error( "Unable to find registered limit for resource " "%(resource)s for %(service)s in region %(region)s.", { "resource": resource_name, "service": self._service_id, "region": self._region_id, }, exec_info=False, ) raise _LimitNotFound(resource_name) def _get_project_limit( self, project_id: str, resource_name: str ) -> _limit.Limit | None: # Look in the cache first. if ( project_id in self.plimit_cache and resource_name in self.plimit_cache[project_id] ): return self.plimit_cache[project_id][resource_name] # Get the limits from keystone. limits = self.connection.limits( # type: ignore service_id=self._service_id, region_id=self._region_id, project_id=project_id, ) limit = None for pl in limits: # NOTE(melwitt): If project_id None was passed in, it's possible # there will be multiple limits for the same resource (from various # projects), so keep the existing oslo.limit behavior and return # the first one we find. This could be considered to be a bug. if limit is None and pl.resource_name == resource_name: limit = pl if self.should_cache: self.plimit_cache[project_id][pl.resource_name] = pl return limit def _get_registered_limit( self, resource_name: str ) -> _registered_limit.RegisteredLimit | None: # Look in the cache first. if resource_name in self.rlimit_cache: return self.rlimit_cache[resource_name] # Get the limits from keystone. reg_limits = self.connection.registered_limits( # type: ignore service_id=self._service_id, region_id=self._region_id ) reg_limit = None for rl in reg_limits: if rl.resource_name == resource_name: reg_limit = rl # Cache the limit if configured. if self.should_cache: self.rlimit_cache[rl.resource_name] = rl return reg_limit ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/oslo_limit/opts.py0000664000175000017500000000536215121004454016233 0ustar00zuulzuul# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from typing import Any from keystoneauth1 import loading from oslo_config import cfg from oslo_limit._i18n import _ __all__ = [ 'list_opts', 'register_opts', ] CONF = cfg.CONF _options = [ cfg.StrOpt( 'endpoint_id', help=_("The service's endpoint id which is registered in Keystone."), ), cfg.StrOpt( 'endpoint_service_name', help=_("Service name for endpoint discovery") ), cfg.StrOpt( 'endpoint_service_type', help=_("Service type for endpoint discovery") ), cfg.StrOpt( 'endpoint_region_name', help=_("Region to which the endpoint belongs") ), cfg.StrOpt( 'endpoint_interface', default='publicURL', choices=[ 'public', 'publicURL', 'internal', 'internalURL', 'admin', 'adminURL', ], help=_("The interface for endpoint discovery"), ), ] _option_group = 'oslo_limit' def list_opts() -> list[tuple[str, list[cfg.Opt]]]: """Return a list of oslo.config options available in the library. :returns: a list of (group_name, opts) tuples """ return [ ( _option_group, copy.deepcopy(_options) + loading.get_session_conf_options() + loading.get_auth_plugin_conf_options('password') + loading.get_auth_plugin_conf_options('v2password') + loading.get_auth_plugin_conf_options('v3password') + loading.get_adapter_conf_options(include_deprecated=False), ) ] def register_opts(conf: cfg.ConfigOpts) -> None: loading.register_session_conf_options(CONF, _option_group) loading.register_adapter_conf_options( CONF, _option_group, include_deprecated=False ) loading.register_auth_conf_options(CONF, _option_group) plugin_name = CONF.oslo_limit.auth_type if plugin_name: plugin_loader: loading.BaseLoader[Any] plugin_loader = loading.get_plugin_loader(plugin_name) plugin_opts = loading.get_auth_plugin_conf_options(plugin_loader) CONF.register_opts(plugin_opts, group=_option_group) conf.register_opts(_options, group=_option_group) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/oslo_limit/py.typed0000664000175000017500000000000015121004454016353 0ustar00zuulzuul././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6538484 oslo_limit-2.9.2/oslo_limit/tests/0000775000175000017500000000000015121004510016021 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/oslo_limit/tests/__init__.py0000664000175000017500000000000015121004454020127 0ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/oslo_limit/tests/test_fixture.py0000664000175000017500000000732615121004454021137 0ustar00zuulzuul# Copyright 2021 Red Hat, Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_limit import exception from oslo_limit import fixture from oslo_limit import limit from oslo_limit import opts CONF = cfg.CONF class TestFixture(base.BaseTestCase): def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config( group='oslo_limit', endpoint_id='ENDPOINT_ID' ) opts.register_opts(CONF) # Set up some default projects, registered limits, # and project limits reglimits = {'widgets': 100, 'sprockets': 50} projlimits = { 'project2': {'widgets': 10}, } self.useFixture(fixture.LimitFixture(reglimits, projlimits)) # Some fake usage for projects self.usage = { 'project1': {'sprockets': 10, 'widgets': 10}, 'project2': {'sprockets': 3, 'widgets': 3}, } def proj_usage(project_id, resource_names): return self.usage[project_id] # An enforcer to play with self.enforcer = limit.Enforcer(proj_usage) def test_project_under_registered_limit_only(self): # Project1 has quota of 50 and 100 each, so no problem with # 10+1 usage. self.enforcer.enforce('project1', {'sprockets': 1, 'widgets': 1}) def test_project_over_registered_limit_only(self): # Project1 has quota of 100 widgets, usage of 112 is over # quota. self.assertRaises( exception.ProjectOverLimit, self.enforcer.enforce, 'project1', {'sprockets': 1, 'widgets': 102}, ) def test_project_over_registered_limit(self): # delta=1 should be under the registered limit of 50 self.enforcer.enforce('project2', {'sprockets': 1}) # delta=50 should be over the registered limit of 50 self.assertRaises( exception.ProjectOverLimit, self.enforcer.enforce, 'project2', {'sprockets': 50}, ) def test_project_over_project_limits(self): # delta=7 is usage=10, right at our project limit of 10 self.enforcer.enforce('project2', {'widgets': 7}) # delta=10 is usage 13, over our project limit of 10 self.assertRaises( exception.ProjectOverLimit, self.enforcer.enforce, 'project2', {'widgets': 10}, ) def test_calculate_usage(self): # Make sure the usage calculator works with the fixture too u = self.enforcer.calculate_usage('project2', ['widgets']) self.assertEqual(3, u['widgets'].usage) self.assertEqual(10, u['widgets'].limit) u = self.enforcer.calculate_usage('project1', ['widgets', 'sprockets']) self.assertEqual(10, u['sprockets'].usage) self.assertEqual(10, u['widgets'].usage) # Since project1 has no project limits, make sure we get the # registered limit values self.assertEqual(50, u['sprockets'].limit) self.assertEqual(100, u['widgets'].limit) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/oslo_limit/tests/test_limit.py0000664000175000017500000010533615121004454020567 0ustar00zuulzuul# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_limit ---------------------------------- Tests for `limit` module. """ from collections.abc import Iterable from typing import Any from unittest import mock import uuid from openstack import exceptions as os_exceptions from openstack.identity.v3 import endpoint from openstack.identity.v3 import limit as klimit from openstack.identity.v3 import region from openstack.identity.v3 import registered_limit from openstack.identity.v3 import service from oslo_config import cfg from oslo_config import fixture as config_fixture from oslotest import base from oslo_limit import exception from oslo_limit import fixture from oslo_limit import limit from oslo_limit import opts CONF = cfg.CONF class TestEnforcer(base.BaseTestCase): def setUp(self): super().setUp() self.deltas = dict() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config(group='oslo_limit', auth_type='password') self.config_fixture.config( group='oslo_limit', endpoint_id='ENDPOINT_ID' ) opts.register_opts(CONF) self.config_fixture.config( group='oslo_limit', auth_url='http://identity.example.com' ) limit._SDK_CONNECTION = mock.MagicMock() json = mock.MagicMock() json.json.return_value = {"model": {"name": "flat"}} limit._SDK_CONNECTION.get.return_value = json def _get_usage_for_project(self, project_id, resource_names): return {"a": 1} def test_usage_callback_must_be_callable(self): invalid_callback_types = [uuid.uuid4().hex, 5, 5.1] for invalid_callback in invalid_callback_types: self.assertRaises(ValueError, limit.Enforcer, invalid_callback) def test_deltas_must_be_a_dictionary(self): project_id = uuid.uuid4().hex invalid_delta_types = [uuid.uuid4().hex, 5, 5.1, True, [], None, {}] enforcer = limit.Enforcer(self._get_usage_for_project) for invalid_delta in invalid_delta_types: self.assertRaises( ValueError, enforcer.enforce, project_id, invalid_delta ) def test_project_id_must_be_a_string(self): enforcer = limit.Enforcer(self._get_usage_for_project) invalid_delta_types = [{}, 5, 5.1, True, False, [], None, ""] for invalid_project_id in invalid_delta_types: self.assertRaises( ValueError, enforcer.enforce, invalid_project_id, {} ) def test_set_model_impl(self): enforcer = limit.Enforcer(self._get_usage_for_project) self.assertIsInstance(enforcer.model, limit._FlatEnforcer) def test_get_model_impl(self): json = mock.MagicMock() limit._SDK_CONNECTION.get.return_value = json # type: ignore json.json.return_value = {"model": {"name": "flat"}} enforcer = limit.Enforcer(self._get_usage_for_project) flat_impl = enforcer._get_model_impl(self._get_usage_for_project) self.assertIsInstance(flat_impl, limit._FlatEnforcer) json.json.return_value = {"model": {"name": "strict-two-level"}} flat_impl = enforcer._get_model_impl(self._get_usage_for_project) self.assertIsInstance(flat_impl, limit._StrictTwoLevelEnforcer) json.json.return_value = {"model": {"name": "foo"}} e = self.assertRaises( ValueError, enforcer._get_model_impl, self._get_usage_for_project ) self.assertEqual("enforcement model foo is not supported", str(e)) @mock.patch.object(limit._FlatEnforcer, "enforce") def test_enforce(self, mock_enforce): enforcer = limit.Enforcer(self._get_usage_for_project) project_id = uuid.uuid4().hex deltas = {"a": 1} enforcer.enforce(project_id, deltas) mock_enforce.assert_called_once_with(project_id, deltas) @mock.patch.object(limit._EnforcerUtils, "get_project_limits") def test_calculate_usage(self, mock_get_limits): mock_usage = mock.MagicMock() mock_usage.return_value = {'a': 1, 'b': 2} project_id = uuid.uuid4().hex mock_get_limits.return_value = [('a', 10), ('b', 5)] expected = { 'a': limit.ProjectUsage(10, 1), 'b': limit.ProjectUsage(5, 2), } enforcer = limit.Enforcer(mock_usage) self.assertEqual( expected, enforcer.calculate_usage(project_id, ['a', 'b']) ) @mock.patch.object(limit._EnforcerUtils, "_get_project_limit") @mock.patch.object(limit._EnforcerUtils, "_get_registered_limit") def test_calculate_and_enforce_some_missing( self, mock_get_reglimit, mock_get_limit ): # Registered and project limits for a and b, c is unregistered reg_limits = { 'a': mock.MagicMock(default_limit=10), 'b': mock.MagicMock(default_limit=10), } prj_limits = {('bar', 'b'): mock.MagicMock(resource_limit=6)} mock_get_reglimit.side_effect = lambda r: reg_limits.get(r) mock_get_limit.side_effect = lambda p, r: prj_limits.get((p, r)) # Regardless, we have usage for all three mock_usage = mock.MagicMock() mock_usage.return_value = {'a': 5, 'b': 5, 'c': 5} enforcer = limit.Enforcer(mock_usage) # When we calculate usage, we should expect the default limit # of zero for the unregistered limit expected = { 'a': limit.ProjectUsage(10, 5), 'b': limit.ProjectUsage(6, 5), 'c': limit.ProjectUsage(0, 5), } self.assertEqual( expected, enforcer.calculate_usage('bar', ['a', 'b', 'c']) ) # Make sure that if we enforce, we get the expected behavior # of c being considered to be zero self.assertRaises( exception.ProjectOverLimit, enforcer.enforce, 'bar', {'a': 1, 'b': 0, 'c': 1}, ) def test_calculate_usage_bad_params(self): enforcer = limit.Enforcer(mock.MagicMock()) # Non-string project_id self.assertRaises(ValueError, enforcer.calculate_usage, 123, ['foo']) # Zero-length resources_to_check self.assertRaises(ValueError, enforcer.calculate_usage, 'project', []) # Non-sequence resources_to_check self.assertRaises(ValueError, enforcer.calculate_usage, 'project', 123) # Invalid non-string value in resources_to_check self.assertRaises( ValueError, enforcer.calculate_usage, 'project', ['a', 123, 'b'] ) @mock.patch.object(limit._EnforcerUtils, "get_registered_limits") def test_get_registered_limits(self, mock_get_limits): mock_get_limits.return_value = [("a", 1), ("b", 0), ("c", 2)] enforcer = limit.Enforcer(lambda: None) # type: ignore limits = enforcer.get_registered_limits(["a", "b", "c"]) mock_get_limits.assert_called_once_with(["a", "b", "c"]) self.assertEqual(mock_get_limits.return_value, limits) @mock.patch.object(limit._EnforcerUtils, "get_project_limits") def test_get_project_limits(self, mock_get_limits): project_id = uuid.uuid4().hex mock_get_limits.return_value = [("a", 1), ("b", 0), ("c", 2)] enforcer = limit.Enforcer(lambda: None) # type: ignore limits = enforcer.get_project_limits(project_id, ["a", "b", "c"]) mock_get_limits.assert_called_once_with(project_id, ["a", "b", "c"]) self.assertEqual(mock_get_limits.return_value, limits) def test_calculate_usage_cache(self, cache=True): project_id = uuid.uuid4().hex fix = self.useFixture( fixture.LimitFixture( {'a': 5, 'b': 7, 'c': 8, 'd': 3}, {project_id: {'a': 2, 'b': 4}, 'other': {'a': 1, 'b': 2}}, ) ) mock_usage = mock.MagicMock() mock_usage.return_value = {'a': 1, 'b': 3, 'c': 2, 'd': 0} enforcer = limit.Enforcer(mock_usage, cache=cache) expected = { 'a': limit.ProjectUsage(2, 1), 'b': limit.ProjectUsage(4, 3), 'c': limit.ProjectUsage(8, 2), 'd': limit.ProjectUsage(3, 0), } self.assertEqual( expected, enforcer.calculate_usage(project_id, ['a', 'b', 'c', 'd']), ) # If caching is enabled, there should be three calls to the GET /limits # API: one for 'a' and 'b' and two because of cache misses for 'c' and # 'd' (the project_id has not set a per-project limit for 'c' or 'd', # so they will not be cached for the project). # If caching is disabled, there should be four calls to the GET # /limits API, one for each of 'a', 'b', 'c', and 'd'. expected_count = 3 if cache else 4 self.assertEqual(expected_count, fix.mock_conn.limits.call_count) # If caching is enabled, there should be one call to the GET # /registered_limits API for 'c' and 'd'. # If caching is disabled, there should be two calls to the GET # /registered_limits API, one for each of 'c' and 'd'. expected_count = 1 if cache else 2 self.assertEqual( expected_count, fix.mock_conn.registered_limits.call_count ) def test_calculate_usage_no_cache(self): self.test_calculate_usage_cache(cache=False) class TestFlatEnforcer(base.BaseTestCase): def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config( group='oslo_limit', endpoint_id='ENDPOINT_ID' ) opts.register_opts(CONF) self.mock_conn = mock.MagicMock() limit._SDK_CONNECTION = self.mock_conn @mock.patch.object(limit._EnforcerUtils, "get_registered_limits") def test_get_registered_limits(self, mock_get_limits): mock_get_limits.return_value = [("a", 1), ("b", 0), ("c", 2)] enforcer = limit._FlatEnforcer(lambda: None) # type: ignore limits = enforcer.get_registered_limits(["a", "b", "c"]) mock_get_limits.assert_called_once_with(["a", "b", "c"]) self.assertEqual(mock_get_limits.return_value, limits) @mock.patch.object(limit._EnforcerUtils, "get_project_limits") def test_get_project_limits(self, mock_get_limits): project_id = uuid.uuid4().hex mock_get_limits.return_value = [("a", 1), ("b", 0), ("c", 2)] enforcer = limit._FlatEnforcer(lambda: None) # type: ignore limits = enforcer.get_project_limits(project_id, ["a", "b", "c"]) mock_get_limits.assert_called_once_with(project_id, ["a", "b", "c"]) self.assertEqual(mock_get_limits.return_value, limits) @mock.patch.object(limit._EnforcerUtils, "get_project_limits") def test_enforce(self, mock_get_limits): mock_usage = mock.MagicMock() project_id = uuid.uuid4().hex deltas = {"a": 1, "b": 1} mock_get_limits.return_value = [("a", 1), ("b", 2)] mock_usage.return_value = {"a": 0, "b": 1} enforcer = limit._FlatEnforcer(mock_usage) enforcer.enforce(project_id, deltas) self.mock_conn.get_endpoint.assert_called_once_with('ENDPOINT_ID') mock_get_limits.assert_called_once_with(project_id, ["a", "b"]) mock_usage.assert_called_once_with(project_id, ["a", "b"]) @mock.patch.object(limit._EnforcerUtils, "get_project_limits") def test_enforce_raises_on_over(self, mock_get_limits): mock_usage = mock.MagicMock() project_id = uuid.uuid4().hex deltas = {"a": 2, "b": 1} mock_get_limits.return_value = [("a", 1), ("b", 2)] mock_usage.return_value = {"a": 0, "b": 1} enforcer = limit._FlatEnforcer(mock_usage) e = self.assertRaises( exception.ProjectOverLimit, enforcer.enforce, project_id, deltas ) expected = ( "Project %s is over a limit for " "[Resource a is over limit of 1 due to current usage 0 " "and delta 2]" ) self.assertEqual(expected % project_id, str(e)) self.assertEqual(project_id, e.project_id) self.assertEqual(1, len(e.over_limit_info_list)) over_a = e.over_limit_info_list[0] self.assertEqual("a", over_a.resource_name) self.assertEqual(1, over_a.limit) self.assertEqual(0, over_a.current_usage) self.assertEqual(2, over_a.delta) @mock.patch.object(limit._EnforcerUtils, "_get_project_limit") @mock.patch.object(limit._EnforcerUtils, "_get_registered_limit") def test_enforce_raises_on_missing_limit( self, mock_get_reglimit, mock_get_limit ): def mock_usage(*a): return {'a': 1, 'b': 1} project_id = uuid.uuid4().hex deltas = {"a": 0, "b": 0} mock_get_reglimit.return_value = None mock_get_limit.return_value = None enforcer = limit._FlatEnforcer(mock_usage) self.assertRaises( exception.ProjectOverLimit, enforcer.enforce, project_id, deltas ) self.assertRaises( exception.ProjectOverLimit, enforcer.enforce, None, deltas ) class TestEnforcerUtils(base.BaseTestCase): def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config( group='oslo_limit', endpoint_id='ENDPOINT_ID' ) opts.register_opts(CONF) self.mock_conn = mock.MagicMock() limit._SDK_CONNECTION = self.mock_conn def test_get_endpoint(self): fake_endpoint = endpoint.Endpoint() self.mock_conn.get_endpoint.return_value = fake_endpoint utils = limit._EnforcerUtils() self.assertEqual(fake_endpoint, utils._endpoint) self.mock_conn.get_endpoint.assert_called_once_with('ENDPOINT_ID') self.mock_conn.services.assert_not_called() self.mock_conn.endpoints.assert_not_called() def test_get_endpoint_no_id(self): self.config_fixture.config(group='oslo_limit', endpoint_id=None) self.mock_conn.get_endpoint.side_effect = ( os_exceptions.ResourceNotFound ) self.assertRaises(ValueError, limit._EnforcerUtils) self.mock_conn.get_endpoint.assert_not_called() self.mock_conn.services.assert_not_called() self.mock_conn.endpoints.assert_not_called() def test_get_endpoint_missing(self): self.mock_conn.get_endpoint.side_effect = ( os_exceptions.ResourceNotFound ) self.assertRaises(ValueError, limit._EnforcerUtils) self.mock_conn.get_endpoint.assert_called_once_with('ENDPOINT_ID') self.mock_conn.services.assert_not_called() self.mock_conn.endpoints.assert_not_called() def test_get_endpoint_lookup_without_service_opts(self): self.config_fixture.config(group='oslo_limit', endpoint_id=None) self.assertRaises(ValueError, limit._EnforcerUtils) self.mock_conn.get_endpoint.assert_not_called() self.mock_conn.services.assert_not_called() self.mock_conn.endpoints.assert_not_called() def test_get_endpoint_lookup(self): self.config_fixture.config(group='oslo_limit', endpoint_id=None) self.config_fixture.config( group='oslo_limit', endpoint_service_type='SERVICE_TYPE' ) self.config_fixture.config( group='oslo_limit', endpoint_service_name='SERVICE_NAME' ) fake_service = service.Service(id='SERVICE_ID') self.mock_conn.services.return_value = [fake_service] fake_endpoint = endpoint.Endpoint() self.mock_conn.endpoints.return_value = [fake_endpoint] utils = limit._EnforcerUtils() self.assertEqual(fake_endpoint, utils._endpoint) self.mock_conn.get_endpoint.assert_not_called() self.mock_conn.services.assert_called_once_with( type='SERVICE_TYPE', name='SERVICE_NAME' ) self.mock_conn.endpoints.assert_called_once_with( service_id='SERVICE_ID', region_id=None, interface='public' ) def test_get_endpoint_lookup_multiple_endpoints(self): self.config_fixture.config(group='oslo_limit', endpoint_id=None) self.config_fixture.config( group='oslo_limit', endpoint_service_type='SERVICE_TYPE' ) self.config_fixture.config( group='oslo_limit', endpoint_service_name='SERVICE_NAME' ) fake_service = service.Service(id='SERVICE_ID') self.mock_conn.services.return_value = [fake_service] self.mock_conn.endpoints.return_value = [ endpoint.Endpoint(), endpoint.Endpoint(), ] self.assertRaises(ValueError, limit._EnforcerUtils) self.mock_conn.get_endpoint.assert_not_called() self.mock_conn.services.assert_called_once_with( type='SERVICE_TYPE', name='SERVICE_NAME' ) self.mock_conn.endpoints.assert_called_once_with( service_id='SERVICE_ID', region_id=None, interface='public' ) def test_get_endpoint_lookup_endpoint_not_found(self): self.config_fixture.config(group='oslo_limit', endpoint_id=None) self.config_fixture.config( group='oslo_limit', endpoint_service_type='SERVICE_TYPE' ) self.config_fixture.config( group='oslo_limit', endpoint_service_name='SERVICE_NAME' ) fake_service = service.Service(id='SERVICE_ID') self.mock_conn.services.return_value = [fake_service] self.mock_conn.endpoints.return_value = [] self.assertRaises(ValueError, limit._EnforcerUtils) self.mock_conn.get_endpoint.assert_not_called() self.mock_conn.services.assert_called_once_with( type='SERVICE_TYPE', name='SERVICE_NAME' ) self.mock_conn.endpoints.assert_called_once_with( service_id='SERVICE_ID', region_id=None, interface='public' ) def test_get_endpoint_lookup_multiple_service(self): self.config_fixture.config(group='oslo_limit', endpoint_id=None) self.config_fixture.config( group='oslo_limit', endpoint_service_type='SERVICE_TYPE' ) self.config_fixture.config( group='oslo_limit', endpoint_service_name='SERVICE_NAME' ) self.mock_conn.services.side_effect = [ service.Service(id='SERVICE_ID1'), service.Service(id='SERVICE_ID2'), ] self.assertRaises(ValueError, limit._EnforcerUtils) self.mock_conn.get_endpoint.assert_not_called() self.mock_conn.services.assert_called_once_with( type='SERVICE_TYPE', name='SERVICE_NAME' ) self.mock_conn.endpoints.assert_not_called() def test_get_endpoint_lookup_service_not_found(self): self.config_fixture.config(group='oslo_limit', endpoint_id=None) self.config_fixture.config( group='oslo_limit', endpoint_service_type='SERVICE_TYPE' ) self.config_fixture.config( group='oslo_limit', endpoint_service_name='SERVICE_NAME' ) self.mock_conn.services.return_value = [] self.assertRaises(ValueError, limit._EnforcerUtils) self.mock_conn.get_endpoint.assert_not_called() self.mock_conn.services.assert_called_once_with( type='SERVICE_TYPE', name='SERVICE_NAME' ) self.mock_conn.endpoints.assert_not_called() def test_get_endpoint_lookup_with_region(self): self.config_fixture.config(group='oslo_limit', endpoint_id=None) self.config_fixture.config( group='oslo_limit', endpoint_service_type='SERVICE_TYPE' ) self.config_fixture.config( group='oslo_limit', endpoint_service_name='SERVICE_NAME' ) self.config_fixture.config( group='oslo_limit', endpoint_region_name='regionOne' ) fake_service = service.Service(id='SERVICE_ID') self.mock_conn.services.return_value = [fake_service] fake_endpoint = endpoint.Endpoint() self.mock_conn.endpoints.return_value = [fake_endpoint] fake_region = region.Region(id='REGION_ID') self.mock_conn.get_region.return_value = fake_region utils = limit._EnforcerUtils() self.assertEqual(fake_endpoint, utils._endpoint) self.mock_conn.get_endpoint.assert_not_called() self.mock_conn.services.assert_called_once_with( type='SERVICE_TYPE', name='SERVICE_NAME' ) self.mock_conn.get_region.assert_called_once_with('regionOne') self.mock_conn.endpoints.assert_called_once_with( service_id='SERVICE_ID', region_id='REGION_ID', interface='public' ) def test_get_endpoint_lookup_with_region_not_found(self): self.config_fixture.config(group='oslo_limit', endpoint_id=None) self.config_fixture.config( group='oslo_limit', endpoint_service_type='SERVICE_TYPE' ) self.config_fixture.config( group='oslo_limit', endpoint_service_name='SERVICE_NAME' ) self.config_fixture.config( group='oslo_limit', endpoint_region_name='regionOne' ) fake_service = service.Service(id='SERVICE_ID') self.mock_conn.services.return_value = [fake_service] fake_endpoint = endpoint.Endpoint() self.mock_conn.endpoints.return_value = [fake_endpoint] self.mock_conn.get_region.side_effect = os_exceptions.ResourceNotFound self.assertRaises(ValueError, limit._EnforcerUtils) self.mock_conn.get_endpoint.assert_not_called() self.mock_conn.services.assert_called_once_with( type='SERVICE_TYPE', name='SERVICE_NAME' ) self.mock_conn.get_region.assert_called_once_with('regionOne') self.mock_conn.endpoints.assert_not_called() def test_get_registered_limit_empty(self): self.mock_conn.registered_limits.return_value = iter([]) utils = limit._EnforcerUtils() reg_limit = utils._get_registered_limit("foo") self.assertIsNone(reg_limit) def test_get_registered_limit(self): foo = registered_limit.RegisteredLimit() foo.resource_name = "foo" self.mock_conn.registered_limits.return_value = iter([foo]) utils = limit._EnforcerUtils() reg_limit = utils._get_registered_limit("foo") self.assertEqual(foo, reg_limit) def test_get_registered_limits(self): fake_endpoint = endpoint.Endpoint( service_id='service_id', region_id='region_id' ) self.mock_conn.get_endpoint.return_value = fake_endpoint # a and c have limits, b doesn't have one empty_iterator: Iterable[Any] = iter([]) a = registered_limit.RegisteredLimit() a.resource_name = "a" a.default_limit = 1 a_iterator = iter([a]) c = registered_limit.RegisteredLimit() c.resource_name = "c" c.default_limit = 2 c_iterator = iter([c]) self.mock_conn.registered_limits.side_effect = [ a_iterator, empty_iterator, c_iterator, ] utils = limit._EnforcerUtils() limits = utils.get_registered_limits(["a", "b", "c"]) self.assertEqual([('a', 1), ('b', 0), ('c', 2)], limits) def test_get_project_limits(self): fake_endpoint = endpoint.Endpoint( service_id='service_id', region_id='region_id' ) self.mock_conn.get_endpoint.return_value = fake_endpoint project_id = uuid.uuid4().hex # a is a project limit, b, c and d don't have one empty_iterator: Iterable[Any] = iter([]) a = klimit.Limit() a.resource_name = "a" a.resource_limit = 1 a_iterator = iter([a]) self.mock_conn.limits.side_effect = [ a_iterator, empty_iterator, empty_iterator, empty_iterator, ] # b has a limit, but c and d doesn't, a isn't ever checked b = registered_limit.RegisteredLimit() b.resource_name = "b" b.default_limit = 2 b_iterator = iter([b]) self.mock_conn.registered_limits.side_effect = [ b_iterator, empty_iterator, empty_iterator, ] utils = limit._EnforcerUtils() limits = utils.get_project_limits(project_id, ["a", "b"]) self.assertEqual([('a', 1), ('b', 2)], limits) limits = utils.get_project_limits(project_id, ["c", "d"]) self.assertEqual([('c', 0), ('d', 0)], limits) def test__get_project_limit_cache(self, cache=True): # Registered limit = 5 and project limit = 3 project_id = uuid.uuid4().hex fix = self.useFixture( fixture.LimitFixture({'foo': 5}, {project_id: {'foo': 3}}) ) utils = limit._EnforcerUtils(cache=cache) foo_limit = utils._get_project_limit(project_id, 'foo') assert foo_limit is not None # narrow type self.assertEqual(3, foo_limit.resource_limit) self.assertEqual(1, fix.mock_conn.limits.call_count) # Second call should be cached, so call_count for project limits should # remain 1. When cache is disabled, it should increase to 2 foo_limit = utils._get_project_limit(project_id, 'foo') count = 1 if cache else 2 self.assertEqual(count, fix.mock_conn.limits.call_count) def test__get_project_limit_cache_no_cache(self): self.test__get_project_limit_cache(cache=False) def test__get_registered_limit_cache(self, cache=True): # Registered limit = 5 and project limit = 3 project_id = uuid.uuid4().hex fix = self.useFixture( fixture.LimitFixture({'foo': 5}, {project_id: {'foo': 3}}) ) utils = limit._EnforcerUtils(cache=cache) foo_limit = utils._get_registered_limit('foo') assert foo_limit is not None # narrow type self.assertEqual(5, foo_limit.default_limit) self.assertEqual(1, fix.mock_conn.registered_limits.call_count) # Second call should be cached, so call_count for project limits should # remain 1. When cache is disabled, it should increase to 2 foo_limit = utils._get_registered_limit('foo') count = 1 if cache else 2 self.assertEqual(count, fix.mock_conn.registered_limits.call_count) def test__get_registered_limit_cache_no_cache(self): self.test__get_registered_limit_cache(cache=False) def test_get_limit_cache(self, cache=True): # No project limit and registered limit = 5 fix = self.useFixture(fixture.LimitFixture({'foo': 5}, {})) project_id = uuid.uuid4().hex utils = limit._EnforcerUtils(cache=cache) foo_limit = utils._get_limit(project_id, 'foo') self.assertEqual(5, foo_limit) self.assertEqual(1, fix.mock_conn.registered_limits.call_count) # Second call should be cached, so call_count for registered limits # should remain 1. When cache is disabled, it should increase to 2 foo_limit = utils._get_limit(project_id, 'foo') self.assertEqual(5, foo_limit) count = 1 if cache else 2 self.assertEqual(count, fix.mock_conn.registered_limits.call_count) # Add a project limit = 1 fix.projlimits[project_id] = {'foo': 1} foo_limit = utils._get_limit(project_id, 'foo') self.assertEqual(1, foo_limit) # Project limits should have been queried 3 times total, once per # _get_limit call self.assertEqual(3, fix.mock_conn.limits.call_count) # Fourth call should be cached, so call_count for project limits should # remain 3. When cache is disabled, it should increase to 4 foo_limit = utils._get_limit(project_id, 'foo') self.assertEqual(1, foo_limit) count = 3 if cache else 4 self.assertEqual(count, fix.mock_conn.limits.call_count) def test_get_limit_no_cache(self): self.test_get_limit_cache(cache=False) def test_get_limit(self): utils = limit._EnforcerUtils(cache=False) mgpl = mock.MagicMock() mgrl = mock.MagicMock() with mock.patch.multiple( utils, _get_project_limit=mgpl, _get_registered_limit=mgrl ): # With a project, we expect the project limit to be # fetched. If present, we never check the registered limit. utils._get_limit('project', 'foo') mgrl.assert_not_called() mgpl.assert_called_once_with('project', 'foo') mgrl.reset_mock() mgpl.reset_mock() # With a project, we expect the project limit to be # fetched. If absent, we check the registered limit. mgpl.return_value = None utils._get_limit('project', 'foo') mgrl.assert_called_once_with('foo') mgpl.assert_called_once_with('project', 'foo') mgrl.reset_mock() mgpl.reset_mock() # With no project, we expect to get registered limit but # not project limit utils._get_limit(None, 'foo') mgrl.assert_called_once_with('foo') mgpl.assert_not_called() def test_get_registered_limits_resource_names_none(self): fix = self.useFixture(fixture.LimitFixture({'foo': 5, 'bar': 7}, {})) utils = limit._EnforcerUtils() limits = utils.get_registered_limits(None) self.assertEqual([('foo', 5), ('bar', 7)], limits) fix.mock_conn.registered_limits.assert_called_once() # Call again with resource names to test caching. limits = utils.get_registered_limits(['foo', 'bar']) self.assertEqual([('foo', 5), ('bar', 7)], limits) fix.mock_conn.registered_limits.assert_called_once() def test_get_registered_limits_resource_names_none_no_cache(self): fix = self.useFixture(fixture.LimitFixture({'foo': 5, 'bar': 7}, {})) utils = limit._EnforcerUtils(cache=False) limits = utils.get_registered_limits(None) self.assertEqual([('foo', 5), ('bar', 7)], limits) fix.mock_conn.registered_limits.assert_called_once() # Call again with resource names to test caching. limits = utils.get_registered_limits(['foo', 'bar']) self.assertEqual([('foo', 5), ('bar', 7)], limits) # First call gets all limits, then one call per resource name. self.assertEqual(3, fix.mock_conn.registered_limits.call_count) def test_get_registered_limits_resource_names(self): fix = self.useFixture(fixture.LimitFixture({'foo': 5, 'bar': 7}, {})) utils = limit._EnforcerUtils() limits = utils.get_registered_limits(['foo', 'bar']) self.assertEqual([('foo', 5), ('bar', 7)], limits) fix.mock_conn.registered_limits.assert_called_once() def test_get_registered_limits_resource_names_no_cache(self): fix = self.useFixture(fixture.LimitFixture({'foo': 5, 'bar': 7}, {})) utils = limit._EnforcerUtils(cache=False) limits = utils.get_registered_limits(['foo', 'bar']) self.assertEqual([('foo', 5), ('bar', 7)], limits) self.assertEqual(2, fix.mock_conn.registered_limits.call_count) def test_get_project_limits_resource_names_none_project_id_none(self): # We consider project_id None to be invalid for "get_project_limits" # because it would require us to make the return format for getting # project limits different than the format for registered limits. # [(name, limit), (name, limit), ...] utils = limit._EnforcerUtils() self.assertRaises(ValueError, utils.get_project_limits, None, None) def test_get_project_limits_resource_names_none(self): project_id = uuid.uuid4().hex fix = self.useFixture( fixture.LimitFixture( {'foo': 5, 'bar': 7}, { project_id: {'foo': 2, 'bar': 4}, 'other': {'foo': 1, 'bar': 2}, }, ) ) utils = limit._EnforcerUtils() limits = utils.get_project_limits(project_id, None) self.assertEqual([('foo', 2), ('bar', 4)], limits) fix.mock_conn.limits.assert_called_once() # Call again with resource names to test caching. limits = utils.get_project_limits(project_id, ['foo', 'bar']) self.assertEqual([('foo', 2), ('bar', 4)], limits) fix.mock_conn.limits.assert_called_once() def test_get_project_limits_resource_names_none_no_cache(self): project_id = uuid.uuid4().hex fix = self.useFixture( fixture.LimitFixture( {'foo': 5, 'bar': 7}, { project_id: {'foo': 2, 'bar': 4}, 'other': {'foo': 1, 'bar': 2}, }, ) ) utils = limit._EnforcerUtils(cache=False) limits = utils.get_project_limits(project_id, None) self.assertEqual([('foo', 2), ('bar', 4)], limits) fix.mock_conn.limits.assert_called_once() # Call again with resource names to test caching. limits = utils.get_project_limits(project_id, ['foo', 'bar']) self.assertEqual([('foo', 2), ('bar', 4)], limits) # First call gets all limits, then one call per resource name. self.assertEqual(3, fix.mock_conn.limits.call_count) def test_get_project_limits_resource_names(self): project_id = uuid.uuid4().hex fix = self.useFixture( fixture.LimitFixture( {'foo': 5, 'bar': 7}, { project_id: {'foo': 2, 'bar': 4}, 'other': {'foo': 1, 'bar': 2}, }, ) ) utils = limit._EnforcerUtils() limits = utils.get_project_limits(project_id, ['foo', 'bar']) self.assertEqual([('foo', 2), ('bar', 4)], limits) fix.mock_conn.limits.assert_called_once() def test_get_project_limits_resource_names_no_cache(self): project_id = uuid.uuid4().hex fix = self.useFixture( fixture.LimitFixture( {'foo': 5, 'bar': 7}, { project_id: {'foo': 2, 'bar': 4}, 'other': {'foo': 1, 'bar': 2}, }, ) ) utils = limit._EnforcerUtils(cache=False) limits = utils.get_project_limits(project_id, ['foo', 'bar']) self.assertEqual([('foo', 2), ('bar', 4)], limits) self.assertEqual(2, fix.mock_conn.limits.call_count) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/pyproject.toml0000664000175000017500000000371515121004454015436 0ustar00zuulzuul[build-system] requires = ["pbr>=6.1.1"] build-backend = "pbr.build" [project] name = "oslo.limit" description = "Limit enforcement library to assist with quota calculation." authors = [ {name = "OpenStack", email = "openstack-discuss@lists.openstack.org"}, ] readme = {file = "README.rst", content-type = "text/x-rst"} license = {text = "Apache-2.0"} dynamic = ["version", "dependencies"] requires-python = ">=3.10" classifiers = [ "Environment :: OpenStack", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: Implementation :: CPython", ] [project.urls] Homepage = "https://docs.openstack.org/oslo.limit" Repository = "https://opendev.org/openstack/oslo.limit" [project.entry-points."oslo.config.opts"] "oslo.limit" = "oslo_limit.opts:list_opts" [tool.setuptools] packages = [ "oslo_limit" ] [tool.mypy] python_version = "3.10" show_column_numbers = true show_error_context = true strict = true ignore_missing_imports = true exclude = '(?x)(doc | releasenotes)' [[tool.mypy.overrides]] module = ["oslo_limit.tests.*"] disallow_untyped_calls = false disallow_untyped_defs = false disallow_subclassing_any = false disallow_any_generics = false [tool.ruff] line-length = 79 [tool.ruff.format] quote-style = "preserve" docstring-code-format = true [tool.ruff.lint] select = ["E4", "E5", "E7", "E9", "F", "G", "LOG", "S", "RUF", "UP", "W"] ignore = [ "S101", # asserts are only used for type narrowing ] [tool.ruff.lint.per-file-ignores] "oslo_limit/tests/*" = ["S"] ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6498485 oslo_limit-2.9.2/releasenotes/0000775000175000017500000000000015121004510015176 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6548483 oslo_limit-2.9.2/releasenotes/notes/0000775000175000017500000000000015121004510016326 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/notes/bug-1962406-e239d60400c726c8.yaml0000664000175000017500000000037715121004454022711 0ustar00zuulzuul--- fixes: - | `bug 1962406` `_: Fixed the wrong format of options data, which was causing failure with ``oslo-config-generator`` command if the ``oslo.limit`` entry point is included. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/notes/bug-2123895-d46347955768fab1.yaml0000664000175000017500000000032115121004454022720 0ustar00zuulzuul--- fixes: - | `bug 2123895` `_: Fixed the capability to query endpoint id from Keystone, which consistently failed due to internal TypeError. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/notes/drop-python27-support-7c1d29f348060147.yaml0000664000175000017500000000017715121004454025372 0ustar00zuulzuul--- upgrade: - | Support for Python 2.7 has been dropped. The minimum version of Python now supported is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/notes/enforcer-limit-caching-fb59725aad88b039.yaml0000664000175000017500000000044715121004454025764 0ustar00zuulzuul--- features: - | ``Enforcer`` objects now cache limits by default for the lifetime of the object to provide improved performance when multiple calls of ``enforce()`` are needed. This behavior is controlled by the boolean ``cache`` keyword argument to the ``__init__`` method. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/notes/fix-ignored-ksa-adapter-options-e120fac9a6fd35f7.yaml0000664000175000017500000000013115121004454027670 0ustar00zuulzuul--- fixes: - | Fix ignored keystoneauth adapter options such as valid_interfaces. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/notes/look-up-endpoint-id-from-keystone-9d8419673902c258.yaml0000664000175000017500000000052215121004454027553 0ustar00zuulzuul--- features: - | The following options have been added to the ``[oslo_limit]`` section. When these options are set instead of the ``endpoint_id`` option, endpoint id is looked up from keystone API. - ``endpoint_service_name`` - ``endpoint_service_type`` - ``endpoint_region_name`` - ``endpoint_interface`` ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/notes/pre-stable-version-warning-83dbfc9427a22725.yaml0000664000175000017500000000066115121004454026550 0ustar00zuulzuul--- critical: - | oslo.limit mistakenly released as 1.0.0. This library is still under heavy development and the API isn't stabilized yet so oslo.limit isn't actually ready to be consumed yet. This library is still a pre-1.0. For further reading about this issue please take a look to the mailing list discussion [1] [1] http://lists.openstack.org/pipermail/openstack-discuss/2020-February/012606.html ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/notes/remove-py39-19083e939173e39e.yaml0000664000175000017500000000016615121004454023340 0ustar00zuulzuul--- upgrade: - | Support for Python 3.9 has been removed. Now the minimum python version supported is 3.10. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6558483 oslo_limit-2.9.2/releasenotes/source/0000775000175000017500000000000015121004510016476 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/2023.1.rst0000664000175000017500000000021015121004454017755 0ustar00zuulzuul=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: unmaintained/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/2023.2.rst0000664000175000017500000000020215121004454017757 0ustar00zuulzuul=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/2024.1.rst0000664000175000017500000000021015121004454017756 0ustar00zuulzuul=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: unmaintained/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/2024.2.rst0000664000175000017500000000020215121004454017760 0ustar00zuulzuul=========================== 2024.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/2025.1.rst0000664000175000017500000000020215121004454017760 0ustar00zuulzuul=========================== 2025.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2025.1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/2025.2.rst0000664000175000017500000000020215121004454017761 0ustar00zuulzuul=========================== 2025.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2025.2 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6558483 oslo_limit-2.9.2/releasenotes/source/_static/0000775000175000017500000000000015121004510020124 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/_static/.placeholder0000664000175000017500000000000015121004454022404 0ustar00zuulzuul././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6558483 oslo_limit-2.9.2/releasenotes/source/_templates/0000775000175000017500000000000015121004510020633 5ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/_templates/.placeholder0000664000175000017500000000000015121004454023113 0ustar00zuulzuul././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/conf.py0000664000175000017500000002172515121004454020013 0ustar00zuulzuul# Copyright (C) 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Keystone Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'oslo.limit Release Notes' copyright = '2018, OpenStack Foundation' # Release notes are version independent # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'oslo.limitReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( 'index', 'oslo.limitReleaseNotes.tex', 'oslo.limit Release Notes Documentation', 'oslo.limit Developers', 'manual', ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( 'index', 'oslo.limitreleasenotes', 'oslo.limit Release Notes Documentation', ['oslo.limit Developers'], 1, ) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( 'index', 'oslo.limitReleaseNotes', 'oslo.limit Release Notes Documentation', 'oslo.limit Developers', 'oslo.limitReleaseNotes', 'Limit enforcement library for OpenStack.', 'Miscellaneous', ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] # -- Options for openstackdocstheme ------------------------------------------- openstackdocs_repo_name = 'openstack/oslo.limit' openstackdocs_bug_project = 'oslo.limit' openstackdocs_bug_tag = 'doc' openstackdocs_auto_name = False ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/index.rst0000664000175000017500000000037415121004454020352 0ustar00zuulzuul=========================== oslo.limit Release Notes =========================== .. toctree:: :maxdepth: 1 unreleased 2025.2 2025.1 2024.2 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/train.rst0000664000175000017500000000017615121004454020360 0ustar00zuulzuul========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/unreleased.rst0000664000175000017500000000014415121004454021365 0ustar00zuulzuul========================== Unreleased Release Notes ========================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/ussuri.rst0000664000175000017500000000020215121004454020563 0ustar00zuulzuul=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/victoria.rst0000664000175000017500000000022015121004454021051 0ustar00zuulzuul============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/wallaby.rst0000664000175000017500000000021415121004454020667 0ustar00zuulzuul============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/xena.rst0000664000175000017500000000020015121004454020162 0ustar00zuulzuul========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/yoga.rst0000664000175000017500000000020015121004454020166 0ustar00zuulzuul========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/releasenotes/source/zed.rst0000664000175000017500000000017415121004454020023 0ustar00zuulzuul======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/requirements.txt0000664000175000017500000000060415121004454016000 0ustar00zuulzuul# Requirements lower bounds listed here are our best effort to keep them up to # date but we do not test them so no guarantee of having them all correct. If # you find any incorrect lower bounds, let us know or propose a fix. keystoneauth1>=3.9.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.44.0 # Apache-2.0 openstacksdk>=0.47.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1766066503.6558483 oslo_limit-2.9.2/setup.cfg0000664000175000017500000000010415121004510014321 0ustar00zuulzuul[metadata] name = oslo.limit [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/setup.py0000664000175000017500000000117215121004454014227 0ustar00zuulzuul# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/test-requirements.txt0000664000175000017500000000016515121004454016757 0ustar00zuulzuulfixtures>=3.0.0 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 coverage>=4.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1766066476.0 oslo_limit-2.9.2/tox.ini0000664000175000017500000000334215121004454014031 0ustar00zuulzuul[tox] minversion = 3.18.0 envlist = py3,pep8,docs [testenv] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = stestr run {posargs} stestr slowest [testenv:pep8] description = Run style checks. deps = pre-commit>=2.6.0 # MIT {[testenv:mypy]deps} commands = pre-commit run -a {[testenv:mypy]commands} [testenv:mypy] description = Run type checks. deps = {[testenv]deps} mypy commands = mypy --cache-dir="{envdir}/mypy_cache" {posargs:oslo_limit} [testenv:venv] commands = {posargs} [testenv:debug] commands = oslo_debug_helper -t oslo_limit/tests {posargs} [testenv:docs] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt allowlist_externals = rm commands = rm -fr doc/build sphinx-build -W --keep-going -b html doc/source doc/build/html [testenv:cover] setenv = PYTHON=coverage run --source oslo_limit --parallel-mode commands = stestr run --slowest {posargs} coverage combine coverage html -d cover coverage report [testenv:releasenotes] allowlist_externals = rm deps = -r{toxinidir}/doc/requirements.txt commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html [flake8] show-source = true exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build # We only enable the hacking (H) checks select = H # H301 Black will put commas after imports that can't fit on one line # H405 Multi-line docstrings are fine ignore = H301,H405 [hacking] import_exceptions = collections types typing oslo_limit._i18n